summaryrefslogtreecommitdiffstats
path: root/arch/mips/alchemy/common
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/mips/alchemy/common/Makefile10
-rw-r--r--arch/mips/alchemy/common/clock.c1116
-rw-r--r--arch/mips/alchemy/common/dbdma.c1092
-rw-r--r--arch/mips/alchemy/common/dma.c265
-rw-r--r--arch/mips/alchemy/common/gpiolib.c172
-rw-r--r--arch/mips/alchemy/common/irq.c996
-rw-r--r--arch/mips/alchemy/common/platform.c458
-rw-r--r--arch/mips/alchemy/common/power.c132
-rw-r--r--arch/mips/alchemy/common/prom.c145
-rw-r--r--arch/mips/alchemy/common/setup.c107
-rw-r--r--arch/mips/alchemy/common/sleeper.S266
-rw-r--r--arch/mips/alchemy/common/time.c155
-rw-r--r--arch/mips/alchemy/common/usb.c627
-rw-r--r--arch/mips/alchemy/common/vss.c85
14 files changed, 5626 insertions, 0 deletions
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
new file mode 100644
index 000000000..a0e94388d
--- /dev/null
+++ b/arch/mips/alchemy/common/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2000, 2008 MontaVista Software Inc.
+# Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Au1xx0 CPUs, generic files.
+#
+
+obj-y += prom.o time.o clock.o platform.o power.o gpiolib.o \
+ setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
new file mode 100644
index 000000000..c01be8c45
--- /dev/null
+++ b/arch/mips/alchemy/common/clock.c
@@ -0,0 +1,1116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy clocks.
+ *
+ * Exposes all configurable internal clock sources to the clk framework.
+ *
+ * We have:
+ * - Root source, usually 12MHz supplied by an external crystal
+ * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
+ *
+ * Dividers:
+ * - 6 clock dividers with:
+ * * selectable source [one of the PLLs],
+ * * output divided between [2 .. 512 in steps of 2] (!Au1300)
+ * or [1 .. 256 in steps of 1] (Au1300),
+ * * can be enabled individually.
+ *
+ * - up to 6 "internal" (fixed) consumers which:
+ * * take either AUXPLL or one of the above 6 dividers as input,
+ * * divide this input by 1, 2, or 4 (and 3 on Au1300).
+ * * can be disabled separately.
+ *
+ * Misc clocks:
+ * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
+ * depends on board design and should be set by bootloader, read-only.
+ * - peripheral clock: half the rate of sysbus clock, source for a lot
+ * of peripheral blocks, read-only.
+ * - memory clock: clk rate to main memory chips, depends on board
+ * design and is read-only,
+ * - lrclk: the static bus clock signal for synchronous operation.
+ * depends on board design, must be set by bootloader,
+ * but may be required to correctly configure devices attached to
+ * the static bus. The Au1000/1500/1100 manuals call it LCLK, on
+ * later models it's called RCLK.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* Base clock: 12MHz is the default in all databooks, and I haven't
+ * found any board yet which uses a different rate.
+ */
+#define ALCHEMY_ROOTCLK_RATE 12000000
+
+/*
+ * the internal sources which can be driven by the PLLs and dividers.
+ * Names taken from the databooks, refer to them for more information,
+ * especially which ones are share a clock line.
+ */
+static const char * const alchemy_au1300_intclknames[] = {
+ "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
+ "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1200_intclknames[] = {
+ "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1550_intclknames[] = {
+ "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
+ "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1100_intclknames[] = {
+ "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1500_intclknames[] = {
+ NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1000_intclknames[] = {
+ "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
+ "EXTCLK1"
+};
+
+/* aliases for a few on-chip sources which are either shared
+ * or have gone through name changes.
+ */
+static struct clk_aliastable {
+ char *alias;
+ char *base;
+ int cputype;
+} alchemy_clk_aliases[] __initdata = {
+ { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
+ { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
+ { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
+ { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
+ { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
+ { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
+ { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
+
+ { NULL, NULL, 0 },
+};
+
+#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
+
+/* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
+static spinlock_t alchemy_clk_fg0_lock;
+static spinlock_t alchemy_clk_fg1_lock;
+static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
+
+/* CPU Core clock *****************************************************/
+
+static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long t;
+
+ /*
+ * On early Au1000, sys_cpupll was write-only. Since these
+ * silicon versions of Au1000 are not sold, we don't bend
+ * over backwards trying to determine the frequency.
+ */
+ if (unlikely(au1xxx_cpu_has_pll_wo()))
+ t = 396000000;
+ else {
+ t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+ if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+ t &= 0x3f;
+ t *= parent_rate;
+ }
+
+ return t;
+}
+
+void __init alchemy_set_lpj(void)
+{
+ preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+ preset_lpj /= 2 * HZ;
+}
+
+static const struct clk_ops alchemy_clkops_cpu = {
+ .recalc_rate = alchemy_clk_cpu_recalc,
+};
+
+static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
+ int ctype)
+{
+ struct clk_init_data id;
+ struct clk_hw *h;
+ struct clk *clk;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ id.name = ALCHEMY_CPU_CLK;
+ id.parent_names = &parent_name;
+ id.num_parents = 1;
+ id.flags = 0;
+ id.ops = &alchemy_clkops_cpu;
+ h->init = &id;
+
+ clk = clk_register(NULL, h);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ kfree(h);
+ }
+
+ return clk;
+}
+
+/* AUXPLLs ************************************************************/
+
+struct alchemy_auxpll_clk {
+ struct clk_hw hw;
+ unsigned long reg; /* au1300 has also AUXPLL2 */
+ int maxmult; /* max multiplier */
+};
+#define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
+
+static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+
+ return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
+}
+
+static int alchemy_clk_aux_setr(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+ unsigned long d = rate;
+
+ if (rate)
+ d /= parent_rate;
+ else
+ d = 0;
+
+ /* minimum is 84MHz, max is 756-1032 depending on variant */
+ if (((d < 7) && (d != 0)) || (d > a->maxmult))
+ return -EINVAL;
+
+ alchemy_wrsys(d, a->reg);
+ return 0;
+}
+
+static long alchemy_clk_aux_roundr(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+ unsigned long mult;
+
+ if (!rate || !*parent_rate)
+ return 0;
+
+ mult = rate / (*parent_rate);
+
+ if (mult && (mult < 7))
+ mult = 7;
+ if (mult > a->maxmult)
+ mult = a->maxmult;
+
+ return (*parent_rate) * mult;
+}
+
+static const struct clk_ops alchemy_clkops_aux = {
+ .recalc_rate = alchemy_clk_aux_recalc,
+ .set_rate = alchemy_clk_aux_setr,
+ .round_rate = alchemy_clk_aux_roundr,
+};
+
+static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
+ char *name, int maxmult,
+ unsigned long reg)
+{
+ struct clk_init_data id;
+ struct clk *c;
+ struct alchemy_auxpll_clk *a;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return ERR_PTR(-ENOMEM);
+
+ id.name = name;
+ id.parent_names = &parent_name;
+ id.num_parents = 1;
+ id.flags = CLK_GET_RATE_NOCACHE;
+ id.ops = &alchemy_clkops_aux;
+
+ a->reg = reg;
+ a->maxmult = maxmult;
+ a->hw.init = &id;
+
+ c = clk_register(NULL, &a->hw);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, name, NULL);
+ else
+ kfree(a);
+
+ return c;
+}
+
+/* sysbus_clk *********************************************************/
+
+static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
+{
+ unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
+ struct clk *c;
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
+ pn, 0, 1, v);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
+ return c;
+}
+
+/* Peripheral Clock ***************************************************/
+
+static struct clk __init *alchemy_clk_setup_periph(const char *pn)
+{
+ /* Peripheral clock runs at half the rate of sysbus clk */
+ struct clk *c;
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
+ pn, 0, 1, 2);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
+ return c;
+}
+
+/* mem clock **********************************************************/
+
+static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
+{
+ void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
+ unsigned long v;
+ struct clk *c;
+ int div;
+
+ switch (ct) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
+ div = (v & (1 << 15)) ? 1 : 2;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
+ div = (v & (1 << 31)) ? 1 : 2;
+ break;
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ default:
+ div = 2;
+ break;
+ }
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
+ 0, 1, div);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
+ return c;
+}
+
+/* lrclk: external synchronous static bus clock ***********************/
+
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
+{
+ /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+ * otherwise lrclk=pclk/4.
+ * All other variants: MEM_STCFG0[15:13] = divisor.
+ * L/RCLK = periph_clk / (divisor + 1)
+ * On Au1000, Au1500, Au1100 it's called LCLK,
+ * on later models it's called RCLK, but it's the same thing.
+ */
+ struct clk *c;
+ unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
+
+ switch (t) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ v = 4 + ((v >> 11) & 1);
+ break;
+ default: /* all other models */
+ v = ((v >> 13) & 7) + 1;
+ }
+ c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
+ pn, 0, 1, v);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
+ return c;
+}
+
+/* Clock dividers and muxes *******************************************/
+
+/* data for fgen and csrc mux-dividers */
+struct alchemy_fgcs_clk {
+ struct clk_hw hw;
+ spinlock_t *reglock; /* register lock */
+ unsigned long reg; /* SYS_FREQCTRL0/1 */
+ int shift; /* offset in register */
+ int parent; /* parent before disable [Au1300] */
+ int isen; /* is it enabled? */
+ int *dt; /* dividertable for csrc */
+};
+#define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
+
+static long alchemy_calc_div(unsigned long rate, unsigned long prate,
+ int scale, int maxdiv, unsigned long *rv)
+{
+ long div1, div2;
+
+ div1 = prate / rate;
+ if ((prate / div1) > rate)
+ div1++;
+
+ if (scale == 2) { /* only div-by-multiple-of-2 possible */
+ if (div1 & 1)
+ div1++; /* stay <=prate */
+ }
+
+ div2 = (div1 / scale) - 1; /* value to write to register */
+
+ if (div2 > maxdiv)
+ div2 = maxdiv;
+ if (rv)
+ *rv = div2;
+
+ div1 = ((div2 + 1) * scale);
+ return div1;
+}
+
+static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ int scale, int maxdiv)
+{
+ struct clk_hw *pc, *bpc, *free;
+ long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
+ int j;
+
+ lastdiff = INT_MAX;
+ bpr = 0;
+ bpc = NULL;
+ br = -EINVAL;
+ free = NULL;
+
+ /* look at the rates each enabled parent supplies and select
+ * the one that gets closest to but not over the requested rate.
+ */
+ for (j = 0; j < 7; j++) {
+ pc = clk_hw_get_parent_by_index(hw, j);
+ if (!pc)
+ break;
+
+ /* if this parent is currently unused, remember it.
+ * XXX: we would actually want clk_has_active_children()
+ * but this is a good-enough approximation for now.
+ */
+ if (!clk_hw_is_prepared(pc)) {
+ if (!free)
+ free = pc;
+ }
+
+ pr = clk_hw_get_rate(pc);
+ if (pr < req->rate)
+ continue;
+
+ /* what can hardware actually provide */
+ tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
+ nr = pr / tdv;
+ diff = req->rate - nr;
+ if (nr > req->rate)
+ continue;
+
+ if (diff < lastdiff) {
+ lastdiff = diff;
+ bpr = pr;
+ bpc = pc;
+ br = nr;
+ }
+ if (diff == 0)
+ break;
+ }
+
+ /* if we couldn't get the exact rate we wanted from the enabled
+ * parents, maybe we can tell an available disabled/inactive one
+ * to give us a rate we can divide down to the requested rate.
+ */
+ if (lastdiff && free) {
+ for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
+ tpr = req->rate * j;
+ if (tpr < 0)
+ break;
+ pr = clk_hw_round_rate(free, tpr);
+
+ tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
+ NULL);
+ nr = pr / tdv;
+ diff = req->rate - nr;
+ if (nr > req->rate)
+ continue;
+ if (diff < lastdiff) {
+ lastdiff = diff;
+ bpr = pr;
+ bpc = free;
+ br = nr;
+ }
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (br < 0)
+ return br;
+
+ req->best_parent_rate = bpr;
+ req->best_parent_hw = bpc;
+ req->rate = br;
+
+ return 0;
+}
+
+static int alchemy_clk_fgv1_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v |= (1 << 1) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
+
+ return v & 1;
+}
+
+static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~((1 << 1) << c->shift);
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ if (index)
+ v |= (1 << c->shift);
+ else
+ v &= ~(1 << c->shift);
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return (alchemy_rdsys(c->reg) >> c->shift) & 1;
+}
+
+static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long div, v, flags, ret;
+ int sh = c->shift + 2;
+
+ if (!rate || !parent_rate || rate > (parent_rate / 2))
+ return -EINVAL;
+ ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(0xff << sh);
+ v |= div << sh;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
+
+ v = ((v & 0xff) + 1) * 2;
+ return parent_rate / v;
+}
+
+static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return alchemy_clk_fgcs_detr(hw, req, 2, 512);
+}
+
+/* Au1000, Au1100, Au15x0, Au12x0 */
+static const struct clk_ops alchemy_clkops_fgenv1 = {
+ .recalc_rate = alchemy_clk_fgv1_recalc,
+ .determine_rate = alchemy_clk_fgv1_detr,
+ .set_rate = alchemy_clk_fgv1_setr,
+ .set_parent = alchemy_clk_fgv1_setp,
+ .get_parent = alchemy_clk_fgv1_getp,
+ .enable = alchemy_clk_fgv1_en,
+ .disable = alchemy_clk_fgv1_dis,
+ .is_enabled = alchemy_clk_fgv1_isen,
+};
+
+static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
+{
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ v &= ~(3 << c->shift);
+ v |= (c->parent & 3) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ c->isen = 1;
+}
+
+static int alchemy_clk_fgv2_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ /* enable by setting the previous parent clock */
+ spin_lock_irqsave(c->reglock, flags);
+ __alchemy_clk_fgv2_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
+}
+
+static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(3 << c->shift); /* set input mux to "disabled" state */
+ alchemy_wrsys(v, c->reg);
+ c->isen = 0;
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ c->parent = index + 1; /* value to write to register */
+ if (c->isen)
+ __alchemy_clk_fgv2_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags, v;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = c->parent - 1;
+ spin_unlock_irqrestore(c->reglock, flags);
+ return v;
+}
+
+/* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
+ * dividers behave exactly as on previous models (dividers are multiples
+ * of 2); with the bit set, dividers are multiples of 1, halving their
+ * range, but making them also much more flexible.
+ */
+static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int sh = c->shift + 2;
+ unsigned long div, v, flags, ret;
+
+ if (!rate || !parent_rate || rate > parent_rate)
+ return -EINVAL;
+
+ v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
+ ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
+ v ? 256 : 512, &div);
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(0xff << sh);
+ v |= (div & 0xff) << sh;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int sh = c->shift + 2;
+ unsigned long v, t;
+
+ v = alchemy_rdsys(c->reg);
+ t = parent_rate / (((v >> sh) & 0xff) + 1);
+ if ((v & (1 << 30)) == 0) /* test scale bit */
+ t /= 2;
+
+ return t;
+}
+
+static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int scale, maxdiv;
+
+ if (alchemy_rdsys(c->reg) & (1 << 30)) {
+ scale = 1;
+ maxdiv = 256;
+ } else {
+ scale = 2;
+ maxdiv = 512;
+ }
+
+ return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
+}
+
+/* Au1300 larger input mux, no separate disable bit, flexible divider */
+static const struct clk_ops alchemy_clkops_fgenv2 = {
+ .recalc_rate = alchemy_clk_fgv2_recalc,
+ .determine_rate = alchemy_clk_fgv2_detr,
+ .set_rate = alchemy_clk_fgv2_setr,
+ .set_parent = alchemy_clk_fgv2_setp,
+ .get_parent = alchemy_clk_fgv2_getp,
+ .enable = alchemy_clk_fgv2_en,
+ .disable = alchemy_clk_fgv2_dis,
+ .is_enabled = alchemy_clk_fgv2_isen,
+};
+
+static const char * const alchemy_clk_fgv1_parents[] = {
+ ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
+};
+
+static const char * const alchemy_clk_fgv2_parents[] = {
+ ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
+};
+
+static const char * const alchemy_clk_fgen_names[] = {
+ ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
+ ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
+
+static int __init alchemy_clk_init_fgens(int ctype)
+{
+ struct clk *c;
+ struct clk_init_data id;
+ struct alchemy_fgcs_clk *a;
+ unsigned long v;
+ int i, ret;
+
+ switch (ctype) {
+ case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
+ id.ops = &alchemy_clkops_fgenv1;
+ id.parent_names = alchemy_clk_fgv1_parents;
+ id.num_parents = 2;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ id.ops = &alchemy_clkops_fgenv2;
+ id.parent_names = alchemy_clk_fgv2_parents;
+ id.num_parents = 3;
+ break;
+ default:
+ return -ENODEV;
+ }
+ id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+
+ a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ spin_lock_init(&alchemy_clk_fg0_lock);
+ spin_lock_init(&alchemy_clk_fg1_lock);
+ ret = 0;
+ for (i = 0; i < 6; i++) {
+ id.name = alchemy_clk_fgen_names[i];
+ a->shift = 10 * (i < 3 ? i : i - 3);
+ if (i > 2) {
+ a->reg = AU1000_SYS_FREQCTRL1;
+ a->reglock = &alchemy_clk_fg1_lock;
+ } else {
+ a->reg = AU1000_SYS_FREQCTRL0;
+ a->reglock = &alchemy_clk_fg0_lock;
+ }
+
+ /* default to first parent if bootloader has set
+ * the mux to disabled state.
+ */
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ v = alchemy_rdsys(a->reg);
+ a->parent = (v >> a->shift) & 3;
+ if (!a->parent) {
+ a->parent = 1;
+ a->isen = 0;
+ } else
+ a->isen = 1;
+ }
+
+ a->hw.init = &id;
+ c = clk_register(NULL, &a->hw);
+ if (IS_ERR(c))
+ ret++;
+ else
+ clk_register_clkdev(c, id.name, NULL);
+ a++;
+ }
+
+ return ret;
+}
+
+/* internal sources muxes *********************************************/
+
+static int alchemy_clk_csrc_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ return (((v >> c->shift) >> 2) & 7) != 0;
+}
+
+static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
+{
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ v &= ~((7 << 2) << c->shift);
+ v |= ((c->parent & 7) << 2) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ c->isen = 1;
+}
+
+static int alchemy_clk_csrc_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ /* enable by setting the previous parent clock */
+ spin_lock_irqsave(c->reglock, flags);
+ __alchemy_clk_csrc_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static void alchemy_clk_csrc_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
+ alchemy_wrsys(v, c->reg);
+ c->isen = 0;
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ c->parent = index + 1; /* value to write to register */
+ if (c->isen)
+ __alchemy_clk_csrc_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return c->parent - 1;
+}
+
+static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
+
+ return parent_rate / c->dt[v];
+}
+
+static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long d, v, flags;
+ int i;
+
+ if (!rate || !parent_rate || rate > parent_rate)
+ return -EINVAL;
+
+ d = (parent_rate + (rate / 2)) / rate;
+ if (d > 4)
+ return -EINVAL;
+ if ((d == 3) && (c->dt[2] != 3))
+ d = 4;
+
+ for (i = 0; i < 4; i++)
+ if (c->dt[i] == d)
+ break;
+
+ if (i >= 4)
+ return -EINVAL; /* oops */
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(3 << c->shift);
+ v |= (i & 3) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_csrc_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
+
+ return alchemy_clk_fgcs_detr(hw, req, scale, 4);
+}
+
+static const struct clk_ops alchemy_clkops_csrc = {
+ .recalc_rate = alchemy_clk_csrc_recalc,
+ .determine_rate = alchemy_clk_csrc_detr,
+ .set_rate = alchemy_clk_csrc_setr,
+ .set_parent = alchemy_clk_csrc_setp,
+ .get_parent = alchemy_clk_csrc_getp,
+ .enable = alchemy_clk_csrc_en,
+ .disable = alchemy_clk_csrc_dis,
+ .is_enabled = alchemy_clk_csrc_isen,
+};
+
+static const char * const alchemy_clk_csrc_parents[] = {
+ /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
+ ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
+ ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
+};
+
+/* divider tables */
+static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
+static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
+
+static int __init alchemy_clk_setup_imux(int ctype)
+{
+ struct alchemy_fgcs_clk *a;
+ const char * const *names;
+ struct clk_init_data id;
+ unsigned long v;
+ int i, ret, *dt;
+ struct clk *c;
+
+ id.ops = &alchemy_clkops_csrc;
+ id.parent_names = alchemy_clk_csrc_parents;
+ id.num_parents = 7;
+ id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+
+ dt = alchemy_csrc_dt1;
+ switch (ctype) {
+ case ALCHEMY_CPU_AU1000:
+ names = alchemy_au1000_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ names = alchemy_au1500_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ names = alchemy_au1100_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1550:
+ names = alchemy_au1550_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ names = alchemy_au1200_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ dt = alchemy_csrc_dt2;
+ names = alchemy_au1300_intclknames;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ a = kcalloc(6, sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = 0;
+
+ for (i = 0; i < 6; i++) {
+ id.name = names[i];
+ if (!id.name)
+ goto next;
+
+ a->shift = i * 5;
+ a->reg = AU1000_SYS_CLKSRC;
+ a->reglock = &alchemy_clk_csrc_lock;
+ a->dt = dt;
+
+ /* default to first parent clock if mux is initially
+ * set to disabled state.
+ */
+ v = alchemy_rdsys(a->reg);
+ a->parent = ((v >> a->shift) >> 2) & 7;
+ if (!a->parent) {
+ a->parent = 1;
+ a->isen = 0;
+ } else
+ a->isen = 1;
+
+ a->hw.init = &id;
+ c = clk_register(NULL, &a->hw);
+ if (IS_ERR(c))
+ ret++;
+ else
+ clk_register_clkdev(c, id.name, NULL);
+next:
+ a++;
+ }
+
+ return ret;
+}
+
+
+/**********************************************************************/
+
+
+#define ERRCK(x) \
+ if (IS_ERR(x)) { \
+ ret = PTR_ERR(x); \
+ goto out; \
+ }
+
+static int __init alchemy_clk_init(void)
+{
+ int ctype = alchemy_get_cputype(), ret, i;
+ struct clk_aliastable *t = alchemy_clk_aliases;
+ struct clk *c;
+
+ /* Root of the Alchemy clock tree: external 12MHz crystal osc */
+ c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
+ 0, ALCHEMY_ROOTCLK_RATE);
+ ERRCK(c)
+
+ /* CPU core clock */
+ c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
+ ERRCK(c)
+
+ /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
+ i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
+ c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
+ i, AU1000_SYS_AUXPLL);
+ ERRCK(c)
+
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
+ ALCHEMY_AUXPLL2_CLK, i,
+ AU1300_SYS_AUXPLL2);
+ ERRCK(c)
+ }
+
+ /* sysbus clock: cpu core clock divided by 2, 3 or 4 */
+ c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
+ ERRCK(c)
+
+ /* peripheral clock: runs at half rate of sysbus clk */
+ c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
+ ERRCK(c)
+
+ /* SDR/DDR memory clock */
+ c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
+ ERRCK(c)
+
+ /* L/RCLK: external static bus clock for synchronous mode */
+ c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
+ ERRCK(c)
+
+ /* Frequency dividers 0-5 */
+ ret = alchemy_clk_init_fgens(ctype);
+ if (ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* diving muxes for internal sources */
+ ret = alchemy_clk_setup_imux(ctype);
+ if (ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* set up aliases drivers might look for */
+ while (t->base) {
+ if (t->cputype == ctype)
+ clk_add_alias(t->alias, NULL, t->base, NULL);
+ t++;
+ }
+
+ pr_info("Alchemy clocktree installed\n");
+ return 0;
+
+out:
+ return ret;
+}
+postcore_initcall(alchemy_clk_init);
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
new file mode 100644
index 000000000..6a3c890f7
--- /dev/null
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -0,0 +1,1092 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * The Descriptor Based DMA channel manager that first appeared
+ * on the Au1550. I started with dma.c, but I think all that is
+ * left is this initial comment :-)
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ * dan@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+
+/*
+ * The Descriptor Based DMA supports up to 16 channels.
+ *
+ * There are 32 devices defined. We keep an internal structure
+ * of devices using these channels, along with additional
+ * information.
+ *
+ * We allocate the descriptors and allow access to them through various
+ * functions. The drivers allocate the data buffers and assign them
+ * to the descriptors.
+ */
+static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
+
+/* I couldn't find a macro that did this... */
+#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
+
+static dbdma_global_t *dbdma_gptr =
+ (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+static int dbdma_initialized;
+
+static dbdev_tab_t *dbdev_tab;
+
+static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
+ /* UARTS */
+ { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
+
+ /* EXT DMA */
+ { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
+
+ /* USB DEV */
+ { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
+
+ /* PSCs */
+ { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
+
+ { AU1550_DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
+ { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
+
+ /* MAC 0 */
+ { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ /* MAC 1 */
+ { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
+ { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
+ { AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x10100004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x10100000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x10101004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x10101000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8, 0x10102004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN, 0, 8, 0x10102000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x10103004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x10103000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8, 0x10601000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 8, 8, 0x10601004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0301c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 16, 0x10a0301c, 0, 0 },
+
+ { AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8, 0x10602000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN, 4, 8, 0x10602004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE, 0, 32, 0x14001810, 0, 0 },
+
+ { AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+/* 32 predefined plus 32 custom */
+#define DBDEV_TAB_SIZE 64
+
+static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
+
+static dbdev_tab_t *find_dbdev_id(u32 id)
+{
+ int i;
+ dbdev_tab_t *p;
+ for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
+ p = &dbdev_tab[i];
+ if (p->dev_id == id)
+ return p;
+ }
+ return NULL;
+}
+
+void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
+{
+ return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+}
+EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
+
+u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
+{
+ u32 ret = 0;
+ dbdev_tab_t *p;
+ static u16 new_id = 0x1000;
+
+ p = find_dbdev_id(~0);
+ if (NULL != p) {
+ memcpy(p, dev, sizeof(dbdev_tab_t));
+ p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
+ ret = p->dev_id;
+ new_id++;
+#if 0
+ printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
+ p->dev_id, p->dev_flags, p->dev_physaddr);
+#endif
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(au1xxx_ddma_add_device);
+
+void au1xxx_ddma_del_device(u32 devid)
+{
+ dbdev_tab_t *p = find_dbdev_id(devid);
+
+ if (p != NULL) {
+ memset(p, 0, sizeof(dbdev_tab_t));
+ p->dev_id = ~0;
+ }
+}
+EXPORT_SYMBOL(au1xxx_ddma_del_device);
+
+/* Allocate a channel and return a non-zero descriptor if successful. */
+u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
+ void (*callback)(int, void *), void *callparam)
+{
+ unsigned long flags;
+ u32 used, chan;
+ u32 dcp;
+ int i;
+ dbdev_tab_t *stp, *dtp;
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ /*
+ * We do the initialization on the first channel allocation.
+ * We have to wait because of the interrupt handler initialization
+ * which can't be done successfully during board set up.
+ */
+ if (!dbdma_initialized)
+ return 0;
+
+ stp = find_dbdev_id(srcid);
+ if (stp == NULL)
+ return 0;
+ dtp = find_dbdev_id(destid);
+ if (dtp == NULL)
+ return 0;
+
+ used = 0;
+
+ /* Check to see if we can get both channels. */
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
+ (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got source */
+ stp->dev_flags |= DEV_FLAGS_INUSE;
+ if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
+ (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got destination */
+ dtp->dev_flags |= DEV_FLAGS_INUSE;
+ } else {
+ /* Can't get dest. Release src. */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ used++;
+ }
+ } else
+ used++;
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (used)
+ return 0;
+
+ /* Let's see if we can allocate a channel for it. */
+ ctp = NULL;
+ chan = 0;
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ for (i = 0; i < NUM_DBDMA_CHANS; i++)
+ if (chan_tab_ptr[i] == NULL) {
+ /*
+ * If kmalloc fails, it is caught below same
+ * as a channel not available.
+ */
+ ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
+ chan_tab_ptr[i] = ctp;
+ break;
+ }
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (ctp != NULL) {
+ memset(ctp, 0, sizeof(chan_tab_t));
+ ctp->chan_index = chan = i;
+ dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ dcp += (0x0100 * chan);
+ ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
+ cp = (au1x_dma_chan_t *)dcp;
+ ctp->chan_src = stp;
+ ctp->chan_dest = dtp;
+ ctp->chan_callback = callback;
+ ctp->chan_callparam = callparam;
+
+ /* Initialize channel configuration. */
+ i = 0;
+ if (stp->dev_intlevel)
+ i |= DDMA_CFG_SED;
+ if (stp->dev_intpolarity)
+ i |= DDMA_CFG_SP;
+ if (dtp->dev_intlevel)
+ i |= DDMA_CFG_DED;
+ if (dtp->dev_intpolarity)
+ i |= DDMA_CFG_DP;
+ if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
+ (dtp->dev_flags & DEV_FLAGS_SYNC))
+ i |= DDMA_CFG_SYNC;
+ cp->ddma_cfg = i;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Return a non-zero value that can be used to find the channel
+ * information in subsequent operations.
+ */
+ return (u32)(&chan_tab_ptr[chan]);
+ }
+
+ /* Release devices */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+
+ return 0;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
+
+/*
+ * Set the device width if source or destination is a FIFO.
+ * Should be 8, 16, or 32 bits.
+ */
+u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
+{
+ u32 rv;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ rv = 0;
+
+ if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
+ rv = stp->dev_devwidth;
+ stp->dev_devwidth = bits;
+ }
+ if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
+ rv = dtp->dev_devwidth;
+ dtp->dev_devwidth = bits;
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
+
+/* Allocate a descriptor ring, initializing as much as possible. */
+u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
+{
+ int i;
+ u32 desc_base, srcid, destid;
+ u32 cmd0, cmd1, src1, dest1;
+ u32 src0, dest0;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ /*
+ * The descriptors must be 32-byte aligned. There is a
+ * possibility the allocation will give us such an address,
+ * and if we try that first we are likely to not waste larger
+ * slabs of memory.
+ */
+ desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
+ GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ if (desc_base & 0x1f) {
+ /*
+ * Lost....do it again, allocate extra, and round
+ * the address base.
+ */
+ kfree((const void *)desc_base);
+ i = entries * sizeof(au1x_ddma_desc_t);
+ i += (sizeof(au1x_ddma_desc_t) - 1);
+ desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ ctp->cdb_membase = desc_base;
+ desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
+ } else
+ ctp->cdb_membase = desc_base;
+
+ dp = (au1x_ddma_desc_t *)desc_base;
+
+ /* Keep track of the base descriptor. */
+ ctp->chan_desc_base = dp;
+
+ /* Initialize the rings with as much information as we know. */
+ srcid = stp->dev_id;
+ destid = dtp->dev_id;
+
+ cmd0 = cmd1 = src1 = dest1 = 0;
+ src0 = dest0 = 0;
+
+ cmd0 |= DSCR_CMD0_SID(srcid);
+ cmd0 |= DSCR_CMD0_DID(destid);
+ cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
+ cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
+
+ /* Is it mem to mem transfer? */
+ if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
+ ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
+ cmd0 |= DSCR_CMD0_MEM;
+
+ switch (stp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ switch (dtp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ /*
+ * If the device is marked as an in/out FIFO, ensure it is
+ * set non-coherent.
+ */
+ if (stp->dev_flags & DEV_FLAGS_IN)
+ cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
+ if (dtp->dev_flags & DEV_FLAGS_OUT)
+ cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
+
+ /*
+ * Set up source1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (stp->dev_tsize) {
+ case 1:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If source input is FIFO, set static address. */
+ if (stp->dev_flags & DEV_FLAGS_IN) {
+ if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
+ else
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
+ }
+
+ if (stp->dev_physaddr)
+ src0 = stp->dev_physaddr;
+
+ /*
+ * Set up dest1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (dtp->dev_tsize) {
+ case 1:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If destination output is FIFO, set static address. */
+ if (dtp->dev_flags & DEV_FLAGS_OUT) {
+ if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
+ else
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
+ }
+
+ if (dtp->dev_physaddr)
+ dest0 = dtp->dev_physaddr;
+
+#if 0
+ printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
+ "source1:%x dest0:%x dest1:%x\n",
+ dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
+ src1, dest0, dest1);
+#endif
+ for (i = 0; i < entries; i++) {
+ dp->dscr_cmd0 = cmd0;
+ dp->dscr_cmd1 = cmd1;
+ dp->dscr_source0 = src0;
+ dp->dscr_source1 = src1;
+ dp->dscr_dest0 = dest0;
+ dp->dscr_dest1 = dest1;
+ dp->dscr_stat = 0;
+ dp->sw_context = 0;
+ dp->sw_status = 0;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
+ dp++;
+ }
+
+ /* Make last descriptor point to the first. */
+ dp--;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ return (u32)ctp->chan_desc_base;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
+
+/*
+ * Put a source buffer into the DMA ring.
+ * This updates the source pointer and byte count. Normally used
+ * for memory to fifo transfers.
+ */
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *(chan_tab_t **)chanid;
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count. */
+ dp->dscr_source0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ /*
+ * There is an erratum on certain Au1200/Au1550 revisions that could
+ * result in "stale" data being DMA'ed. It has to do with the snoop
+ * logic on the cache eviction buffer. dma_default_coherent is set
+ * to false on these parts.
+ */
+ if (!dma_default_coherent)
+ dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ wmb(); /* drain writebuffer */
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+ wmb(); /* force doorbell write out to dma engine */
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_source);
+
+/* Put a destination buffer into the DMA ring.
+ * This updates the destination pointer and byte count. Normally used
+ * to place an empty buffer into the ring for fifo to memory transfers.
+ */
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /* I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /* We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /* If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count */
+
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ dp->dscr_dest0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+#if 0
+ printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
+ dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
+ dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
+#endif
+ /*
+ * There is an erratum on certain Au1200/Au1550 revisions that could
+ * result in "stale" data being DMA'ed. It has to do with the snoop
+ * logic on the cache eviction buffer. dma_default_coherent is set
+ * to false on these parts.
+ */
+ if (!dma_default_coherent)
+ dma_cache_inv(KSEG0ADDR(buf), nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ wmb(); /* drain writebuffer */
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+ wmb(); /* force doorbell write out to dma engine */
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
+
+/*
+ * Get a destination buffer into the DMA ring.
+ * Normally used to get a full buffer from the ring during fifo
+ * to memory transfers. This does not set the valid bit, you will
+ * have to put another destination buffer to keep the DMA going.
+ */
+u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 rv;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->get_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Return buffer address and byte count. */
+ *buf = (void *)(phys_to_virt(dp->dscr_dest0));
+ *nbytes = dp->dscr_cmd1;
+ rv = dp->dscr_stat;
+
+ /* Get next descriptor pointer. */
+ ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
+
+void au1xxx_dbdma_stop(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ int halt_timeout = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+
+ cp = ctp->chan_ptr;
+ cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
+ wmb(); /* drain writebuffer */
+ while (!(cp->ddma_stat & DDMA_STAT_H)) {
+ udelay(1);
+ halt_timeout++;
+ if (halt_timeout > 100) {
+ printk(KERN_WARNING "warning: DMA channel won't halt\n");
+ break;
+ }
+ }
+ /* clear current desc valid and doorbell */
+ cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
+ wmb(); /* drain writebuffer */
+}
+EXPORT_SYMBOL(au1xxx_dbdma_stop);
+
+/*
+ * Start using the current descriptor pointer. If the DBDMA encounters
+ * a non-valid descriptor, it will stop. In this case, we can just
+ * continue by adding a buffer to the list and starting again.
+ */
+void au1xxx_dbdma_start(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+ cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
+ cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
+ wmb(); /* drain writebuffer */
+ cp->ddma_dbell = 0;
+ wmb(); /* drain writebuffer */
+}
+EXPORT_SYMBOL(au1xxx_dbdma_start);
+
+void au1xxx_dbdma_reset(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ au1xxx_dbdma_stop(chanid);
+
+ ctp = *((chan_tab_t **)chanid);
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ /* Run through the descriptors and reset the valid indicator. */
+ dp = ctp->chan_desc_base;
+
+ do {
+ dp->dscr_cmd0 &= ~DSCR_CMD0_V;
+ /*
+ * Reset our software status -- this is used to determine
+ * if a descriptor is in use by upper level software. Since
+ * posting can reset 'V' bit.
+ */
+ dp->sw_status = 0;
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_reset);
+
+u32 au1xxx_get_dma_residue(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ u32 rv;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+
+ /* This is only valid if the channel is stopped. */
+ rv = cp->ddma_bytecnt;
+ wmb(); /* drain writebuffer */
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
+
+void au1xxx_dbdma_chan_free(u32 chanid)
+{
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ au1xxx_dbdma_stop(chanid);
+
+ kfree((void *)ctp->cdb_membase);
+
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+ chan_tab_ptr[ctp->chan_index] = NULL;
+
+ kfree(ctp);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
+
+static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
+{
+ u32 intstat;
+ u32 chan_index;
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ au1x_dma_chan_t *cp;
+
+ intstat = dbdma_gptr->ddma_intstat;
+ wmb(); /* drain writebuffer */
+ chan_index = __ffs(intstat);
+
+ ctp = chan_tab_ptr[chan_index];
+ cp = ctp->chan_ptr;
+ dp = ctp->cur_ptr;
+
+ /* Reset interrupt. */
+ cp->ddma_irq = 0;
+ wmb(); /* drain writebuffer */
+
+ if (ctp->chan_callback)
+ ctp->chan_callback(irq, ctp->chan_callparam);
+
+ ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ return IRQ_RETVAL(1);
+}
+
+void au1xxx_dbdma_dump(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_dma_chan_t *cp;
+ u32 i = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ cp = ctp->chan_ptr;
+
+ printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
+ (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
+ dtp - dbdev_tab);
+ printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
+ (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
+ (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
+
+ printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
+ printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
+ cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
+ printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
+ cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
+ cp->ddma_bytecnt);
+
+ /* Run through the descriptors */
+ dp = ctp->chan_desc_base;
+
+ do {
+ printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
+ i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
+ printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
+ dp->dscr_source0, dp->dscr_source1,
+ dp->dscr_dest0, dp->dscr_dest1);
+ printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
+ dp->dscr_stat, dp->dscr_nxtptr);
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+
+/* Put a descriptor into the DMA ring.
+ * This updates the source/destination pointers and byte count.
+ */
+u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 nbytes = 0;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer addresses and byte count. */
+ dp->dscr_dest0 = dscr->dscr_dest0;
+ dp->dscr_source0 = dscr->dscr_source0;
+ dp->dscr_dest1 = dscr->dscr_dest1;
+ dp->dscr_source1 = dscr->dscr_source1;
+ dp->dscr_cmd1 = dscr->dscr_cmd1;
+ nbytes = dscr->dscr_cmd1;
+ /* Allow the caller to specify if an interrupt is generated */
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+ dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+
+
+static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
+
+static int alchemy_dbdma_suspend(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
+
+ /* save channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
+ alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
+ alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
+
+ /* halt channel */
+ __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
+ wmb();
+ while (!(__raw_readl(addr + 0x14) & 1))
+ wmb();
+
+ addr += 0x100; /* next channel base */
+ }
+ /* disable channel interrupts */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(0, addr + 0x0c);
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_dbdma_resume(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
+
+ /* restore channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
+ __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
+ __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
+ wmb();
+ addr += 0x100; /* next channel base */
+ }
+}
+
+static struct syscore_ops alchemy_dbdma_syscore_ops = {
+ .suspend = alchemy_dbdma_suspend,
+ .resume = alchemy_dbdma_resume,
+};
+
+static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
+{
+ int ret;
+
+ dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
+ if (!dbdev_tab)
+ return -ENOMEM;
+
+ memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
+ for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
+ dbdev_tab[ret].dev_id = ~0;
+
+ dbdma_gptr->ddma_config = 0;
+ dbdma_gptr->ddma_throttle = 0;
+ dbdma_gptr->ddma_inten = 0xffff;
+ wmb(); /* drain writebuffer */
+
+ ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
+ if (ret)
+ printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
+ else {
+ dbdma_initialized = 1;
+ register_syscore_ops(&alchemy_dbdma_syscore_ops);
+ }
+
+ return ret;
+}
+
+static int __init alchemy_dbdma_init(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
+ case ALCHEMY_CPU_AU1200:
+ return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
+ case ALCHEMY_CPU_AU1300:
+ return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
+ }
+ return 0;
+}
+subsys_initcall(alchemy_dbdma_init);
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
new file mode 100644
index 000000000..973049b5b
--- /dev/null
+++ b/arch/mips/alchemy/common/dma.c
@@ -0,0 +1,265 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * A DMA channel allocator for Au1x00. API is modeled loosely off of
+ * linux/kernel/dma.c.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1000_dma.h>
+
+/*
+ * A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
+ * When releasing them, first release the IRQ, then release the DMA. The
+ * main reason for this order is that, if you are requesting the DMA buffer
+ * done interrupt, you won't know the irq number until the DMA channel is
+ * returned from request_dma.
+ */
+
+/* DMA Channel register block spacing */
+#define DMA_CHANNEL_LEN 0x00000100
+
+DEFINE_SPINLOCK(au1000_dma_spin_lock);
+
+struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,}
+};
+EXPORT_SYMBOL(au1000_dma_table);
+
+/* Device FIFO addresses and default DMA modes */
+static const struct dma_dev {
+ unsigned int fifo_addr;
+ unsigned int dma_mode;
+} dma_dev_table[DMA_NUM_DEV] = {
+ { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */
+ { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */
+ { 0, 0 }, /* DMA_REQ0 */
+ { 0, 0 }, /* DMA_REQ1 */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */
+ { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */
+ { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
+ /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
+};
+
+int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
+ int length, int *eof, void *data)
+{
+ int i, len = 0;
+ struct dma_chan *chan;
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
+ chan = get_dma_chan(i);
+ if (chan != NULL)
+ len += sprintf(buf + len, "%2d: %s\n",
+ i, chan->dev_str);
+ }
+
+ if (fpos >= len) {
+ *start = buf;
+ *eof = 1;
+ return 0;
+ }
+ *start = buf + fpos;
+ len -= fpos;
+ if (len > length)
+ return length;
+ *eof = 1;
+ return len;
+}
+
+/* Device FIFO addresses and default DMA modes - 2nd bank */
+static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
+ { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
+};
+
+void dump_au1000_dma_channel(unsigned int dmanr)
+{
+ struct dma_chan *chan;
+
+ if (dmanr >= NUM_AU1000_DMA_CHANNELS)
+ return;
+ chan = &au1000_dma_table[dmanr];
+
+ printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
+ printk(KERN_INFO " mode = 0x%08x\n",
+ __raw_readl(chan->io + DMA_MODE_SET));
+ printk(KERN_INFO " addr = 0x%08x\n",
+ __raw_readl(chan->io + DMA_PERIPHERAL_ADDR));
+ printk(KERN_INFO " start0 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER0_START));
+ printk(KERN_INFO " start1 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER1_START));
+ printk(KERN_INFO " count0 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER0_COUNT));
+ printk(KERN_INFO " count1 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER1_COUNT));
+}
+
+/*
+ * Finds a free channel, and binds the requested device to it.
+ * Returns the allocated channel number, or negative on error.
+ * Requests the DMA done IRQ if irqhandler != NULL.
+ */
+int request_au1000_dma(int dev_id, const char *dev_str,
+ irq_handler_t irqhandler,
+ unsigned long irqflags,
+ void *irq_dev_id)
+{
+ struct dma_chan *chan;
+ const struct dma_dev *dev;
+ int i, ret;
+
+ if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
+ if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
+ return -EINVAL;
+ } else {
+ if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ if (au1000_dma_table[i].dev_id < 0)
+ break;
+
+ if (i == NUM_AU1000_DMA_CHANNELS)
+ return -ENODEV;
+
+ chan = &au1000_dma_table[i];
+
+ if (dev_id >= DMA_NUM_DEV) {
+ dev_id -= DMA_NUM_DEV;
+ dev = &dma_dev_table_bank2[dev_id];
+ } else
+ dev = &dma_dev_table[dev_id];
+
+ if (irqhandler) {
+ chan->irq_dev = irq_dev_id;
+ ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
+ chan->irq_dev);
+ if (ret) {
+ chan->irq_dev = NULL;
+ return ret;
+ }
+ } else {
+ chan->irq_dev = NULL;
+ }
+
+ /* fill it in */
+ chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) +
+ i * DMA_CHANNEL_LEN);
+ chan->dev_id = dev_id;
+ chan->dev_str = dev_str;
+ chan->fifo_addr = dev->fifo_addr;
+ chan->mode = dev->dma_mode;
+
+ /* initialize the channel before returning */
+ init_dma(i);
+
+ return i;
+}
+EXPORT_SYMBOL(request_au1000_dma);
+
+void free_au1000_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan) {
+ printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ disable_dma(dmanr);
+ if (chan->irq_dev)
+ free_irq(chan->irq, chan->irq_dev);
+
+ chan->irq_dev = NULL;
+ chan->dev_id = -1;
+}
+EXPORT_SYMBOL(free_au1000_dma);
+
+static int __init au1000_dma_init(void)
+{
+ int base, i;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ base = AU1000_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ base = AU1500_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ base = AU1100_DMA_INT_BASE;
+ break;
+ default:
+ goto out;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ au1000_dma_table[i].irq = base + i;
+
+ printk(KERN_INFO "Alchemy DMA initialized\n");
+
+out:
+ return 0;
+}
+arch_initcall(au1000_dma_init);
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
new file mode 100644
index 000000000..1b16daaa8
--- /dev/null
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org>
+ * GPIOLIB support for Alchemy chips.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Notes :
+ * au1000 SoC have only one GPIO block : GPIO1
+ * Au1100, Au15x0, Au12x0 have a second one : GPIO2
+ * Au1300 is totally different: 1 block with up to 128 GPIOs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gpio/driver.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+static int gpio2_get(struct gpio_chip *chip, unsigned offset)
+{
+ return !!alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
+}
+
+static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
+ value);
+}
+
+static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
+}
+
+
+static int gpio1_get(struct gpio_chip *chip, unsigned offset)
+{
+ return !!alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static void gpio1_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
+}
+
+static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static int gpio1_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
+ value);
+}
+
+static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
+}
+
+struct gpio_chip alchemy_gpio_chip[] = {
+ [0] = {
+ .label = "alchemy-gpio1",
+ .direction_input = gpio1_direction_input,
+ .direction_output = gpio1_direction_output,
+ .get = gpio1_get,
+ .set = gpio1_set,
+ .to_irq = gpio1_to_irq,
+ .base = ALCHEMY_GPIO1_BASE,
+ .ngpio = ALCHEMY_GPIO1_NUM,
+ },
+ [1] = {
+ .label = "alchemy-gpio2",
+ .direction_input = gpio2_direction_input,
+ .direction_output = gpio2_direction_output,
+ .get = gpio2_get,
+ .set = gpio2_set,
+ .to_irq = gpio2_to_irq,
+ .base = ALCHEMY_GPIO2_BASE,
+ .ngpio = ALCHEMY_GPIO2_NUM,
+ },
+};
+
+static int alchemy_gpic_get(struct gpio_chip *chip, unsigned int off)
+{
+ return !!au1300_gpio_get_value(off + AU1300_GPIO_BASE);
+}
+
+static void alchemy_gpic_set(struct gpio_chip *chip, unsigned int off, int v)
+{
+ au1300_gpio_set_value(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_dir_input(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_direction_input(off + AU1300_GPIO_BASE);
+}
+
+static int alchemy_gpic_dir_output(struct gpio_chip *chip, unsigned int off,
+ int v)
+{
+ return au1300_gpio_direction_output(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_gpio_to_irq(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_to_irq(off + AU1300_GPIO_BASE);
+}
+
+static struct gpio_chip au1300_gpiochip = {
+ .label = "alchemy-gpic",
+ .direction_input = alchemy_gpic_dir_input,
+ .direction_output = alchemy_gpic_dir_output,
+ .get = alchemy_gpic_get,
+ .set = alchemy_gpic_set,
+ .to_irq = alchemy_gpic_gpio_to_irq,
+ .base = AU1300_GPIO_BASE,
+ .ngpio = AU1300_GPIO_NUM,
+};
+
+static int __init alchemy_gpiochip_init(void)
+{
+ int ret = 0;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
+ break;
+ case ALCHEMY_CPU_AU1500...ALCHEMY_CPU_AU1200:
+ ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
+ ret |= gpiochip_add_data(&alchemy_gpio_chip[1], NULL);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = gpiochip_add_data(&au1300_gpiochip, NULL);
+ break;
+ }
+ return ret;
+}
+arch_initcall(alchemy_gpiochip_init);
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
new file mode 100644
index 000000000..da9f92200
--- /dev/null
+++ b/arch/mips/alchemy/common/irq.c
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2001, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD 0x40
+#define IC_CFG0SET 0x40
+#define IC_CFG0CLR 0x44
+#define IC_CFG1RD 0x48
+#define IC_CFG1SET 0x48
+#define IC_CFG1CLR 0x4C
+#define IC_CFG2RD 0x50
+#define IC_CFG2SET 0x50
+#define IC_CFG2CLR 0x54
+#define IC_REQ0INT 0x54
+#define IC_SRCRD 0x58
+#define IC_SRCSET 0x58
+#define IC_SRCCLR 0x5C
+#define IC_REQ1INT 0x5C
+#define IC_ASSIGNRD 0x60
+#define IC_ASSIGNSET 0x60
+#define IC_ASSIGNCLR 0x64
+#define IC_WAKERD 0x68
+#define IC_WAKESET 0x68
+#define IC_WAKECLR 0x6C
+#define IC_MASKRD 0x70
+#define IC_MASKSET 0x70
+#define IC_MASKCLR 0x74
+#define IC_RISINGRD 0x78
+#define IC_RISINGCLR 0x78
+#define IC_FALLINGRD 0x7C
+#define IC_FALLINGCLR 0x7C
+#define IC_TESTBIT 0x80
+
+/* per-processor fixed function irqs */
+struct alchemy_irqmap {
+ int irq; /* linux IRQ number */
+ int type; /* IRQ_TYPE_ */
+ int prio; /* irq priority, 0 highest, 3 lowest */
+ int internal; /* GPIC: internal source (no ext. pin)? */
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int type);
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
+
+
+/* NOTE on interrupt priorities: The original writers of this code said:
+ *
+ * Because of the tight timing of SETUP token to reply transactions,
+ * the USB devices-side packet complete interrupt (USB_DEV_REQ_INT)
+ * needs the highest priority.
+ */
+struct alchemy_irqmap au1000_irqmap[] __initdata = {
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1500_irqmap[] __initdata = {
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1100_irqmap[] __initdata = {
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1550_irqmap[] __initdata = {
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1200_irqmap[] __initdata = {
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+static struct alchemy_irqmap au1300_irqmap[] __initdata = {
+ /* multifunction: gpio pin or device */
+ { AU1300_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_NAND_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ /* au1300 internal */
+ { AU1300_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MMU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_GPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_UDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 1, },
+ { AU1300_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_SD0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_BSA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPE_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_ITE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_CIM_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { -1, }, /* terminator */
+};
+
+/******************************************************************************/
+
+static void au1x_ic0_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic1_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic0_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic1_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic0_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic0_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
+{
+ int bit = d->irq - AU1000_INTC1_INT_BASE;
+ unsigned long wakemsk, flags;
+
+ /* only GPIO 0-7 can act as wakeup source. Fortunately these
+ * are wired up identically on all supported variants.
+ */
+ if ((bit < 0) || (bit > 7))
+ return -EINVAL;
+
+ local_irq_save(flags);
+ wakemsk = alchemy_rdsys(AU1000_SYS_WAKEMSK);
+ if (on)
+ wakemsk |= 1 << bit;
+ else
+ wakemsk &= ~(1 << bit);
+ alchemy_wrsys(wakemsk, AU1000_SYS_WAKEMSK);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * irq_chips for both ICs; this way the mask handlers can be
+ * as short as possible.
+ */
+static struct irq_chip au1x_ic0_chip = {
+ .name = "Alchemy-IC0",
+ .irq_ack = au1x_ic0_ack,
+ .irq_mask = au1x_ic0_mask,
+ .irq_mask_ack = au1x_ic0_maskack,
+ .irq_unmask = au1x_ic0_unmask,
+ .irq_set_type = au1x_ic_settype,
+};
+
+static struct irq_chip au1x_ic1_chip = {
+ .name = "Alchemy-IC1",
+ .irq_ack = au1x_ic1_ack,
+ .irq_mask = au1x_ic1_mask,
+ .irq_mask_ack = au1x_ic1_maskack,
+ .irq_unmask = au1x_ic1_unmask,
+ .irq_set_type = au1x_ic_settype,
+ .irq_set_wake = au1x_ic1_setwake,
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
+{
+ struct irq_chip *chip;
+ unsigned int bit, irq = d->irq;
+ irq_flow_handler_t handler = NULL;
+ unsigned char *name = NULL;
+ void __iomem *base;
+ int ret;
+
+ if (irq >= AU1000_INTC1_INT_BASE) {
+ bit = irq - AU1000_INTC1_INT_BASE;
+ chip = &au1x_ic1_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq - AU1000_INTC0_INT_BASE;
+ chip = &au1x_ic0_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+
+ if (bit > 31)
+ return -EINVAL;
+
+ ret = 0;
+
+ switch (flow_type) { /* cfgregs 2:1:0 */
+ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "riseedge";
+ break;
+ case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_edge_irq;
+ name = "falledge";
+ break;
+ case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "bothedge";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_level_irq;
+ name = "hilevel";
+ break;
+ case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_level_irq;
+ name = "lowlevel";
+ break;
+ case IRQ_TYPE_NONE: /* 0:0:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ irq_set_chip_handler_name_locked(d, chip, handler, name);
+
+ wmb();
+
+ return ret;
+}
+
+/******************************************************************************/
+
+/*
+ * au1300_gpic_chgcfg - change PIN configuration.
+ * @gpio: pin to change (0-based GPIO number from datasheet).
+ * @clr: clear all bits set in 'clr'.
+ * @set: set these bits.
+ *
+ * modifies a pins' configuration register, bits set in @clr will
+ * be cleared in the register, bits in @set will be set.
+ */
+static inline void au1300_gpic_chgcfg(unsigned int gpio,
+ unsigned long clr,
+ unsigned long set)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long l;
+
+ r += gpio * 4; /* offset into pin config array */
+ l = __raw_readl(r + AU1300_GPIC_PINCFG);
+ l &= ~clr;
+ l |= set;
+ __raw_writel(l, r + AU1300_GPIC_PINCFG);
+ wmb();
+}
+
+/*
+ * au1300_pinfunc_to_gpio - assign a pin as GPIO input (GPIO ctrl).
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to the GPIO controller, so its level can either
+ * be read or set through the generic GPIO functions.
+ * If you need a GPOUT, use au1300_gpio_set_value(pin, 0/1).
+ * REVISIT: is this function really necessary?
+ */
+void au1300_pinfunc_to_gpio(enum au1300_multifunc_pins gpio)
+{
+ au1300_gpio_direction_input(gpio + AU1300_GPIO_BASE);
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_gpio);
+
+/*
+ * au1300_pinfunc_to_dev - assign a pin to the device function.
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to its associated device function; the pin will be
+ * driven by the device and not through GPIO functions.
+ */
+void au1300_pinfunc_to_dev(enum au1300_multifunc_pins gpio)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit;
+
+ r += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ __raw_writel(bit, r + AU1300_GPIC_DEVSEL);
+ wmb();
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_dev);
+
+/*
+ * au1300_set_irq_priority - set internal priority of IRQ.
+ * @irq: irq to set priority (linux irq number).
+ * @p: priority (0 = highest, 3 = lowest).
+ */
+void au1300_set_irq_priority(unsigned int irq, int p)
+{
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ au1300_gpic_chgcfg(irq, GPIC_CFG_IL_MASK, GPIC_CFG_IL_SET(p));
+}
+EXPORT_SYMBOL_GPL(au1300_set_irq_priority);
+
+/*
+ * au1300_set_dbdma_gpio - assign a gpio to one of the DBDMA triggers.
+ * @dchan: dbdma trigger select (0, 1).
+ * @gpio: pin to assign as trigger.
+ *
+ * DBDMA controller has 2 external trigger sources; this function
+ * assigns a GPIO to the selected trigger.
+ */
+void au1300_set_dbdma_gpio(int dchan, unsigned int gpio)
+{
+ unsigned long r;
+
+ if ((dchan >= 0) && (dchan <= 1)) {
+ r = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ r &= ~(0xff << (8 * dchan));
+ r |= (gpio & 0x7f) << (8 * dchan);
+ __raw_writel(r, AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ wmb();
+ }
+}
+
+static inline void gpic_pin_set_idlewake(unsigned int gpio, int allow)
+{
+ au1300_gpic_chgcfg(gpio, GPIC_CFG_IDLEWAKE,
+ allow ? GPIC_CFG_IDLEWAKE : 0);
+}
+
+static void au1300_gpic_mask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IDIS);
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_unmask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+
+ gpic_pin_set_idlewake(irq, 1);
+
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IEN);
+ wmb();
+}
+
+static void au1300_gpic_maskack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ __raw_writel(bit, r + AU1300_GPIC_IDIS); /* mask */
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_ack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ wmb();
+}
+
+static struct irq_chip au1300_gpic = {
+ .name = "GPIOINT",
+ .irq_ack = au1300_gpic_ack,
+ .irq_mask = au1300_gpic_mask,
+ .irq_mask_ack = au1300_gpic_maskack,
+ .irq_unmask = au1300_gpic_unmask,
+ .irq_set_type = au1300_gpic_settype,
+};
+
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type)
+{
+ unsigned long s;
+ unsigned char *name = NULL;
+ irq_flow_handler_t hdl = NULL;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ s = GPIC_CFG_IC_LEVEL_HIGH;
+ name = "high";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ s = GPIC_CFG_IC_LEVEL_LOW;
+ name = "low";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ s = GPIC_CFG_IC_EDGE_RISE;
+ name = "posedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ s = GPIC_CFG_IC_EDGE_FALL;
+ name = "negedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ s = GPIC_CFG_IC_EDGE_BOTH;
+ name = "bothedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_NONE:
+ s = GPIC_CFG_IC_OFF;
+ name = "disabled";
+ hdl = handle_level_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
+
+ au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
+
+ return 0;
+}
+
+/******************************************************************************/
+
+static inline void ic_init(void __iomem *base)
+{
+ /* initialize interrupt controller to a safe state */
+ __raw_writel(0xffffffff, base + IC_CFG0CLR);
+ __raw_writel(0xffffffff, base + IC_CFG1CLR);
+ __raw_writel(0xffffffff, base + IC_CFG2CLR);
+ __raw_writel(0xffffffff, base + IC_MASKCLR);
+ __raw_writel(0xffffffff, base + IC_ASSIGNCLR);
+ __raw_writel(0xffffffff, base + IC_WAKECLR);
+ __raw_writel(0xffffffff, base + IC_SRCSET);
+ __raw_writel(0xffffffff, base + IC_FALLINGCLR);
+ __raw_writel(0xffffffff, base + IC_RISINGCLR);
+ __raw_writel(0x00000000, base + IC_TESTBIT);
+ wmb();
+}
+
+static unsigned long alchemy_gpic_pmdata[ALCHEMY_GPIC_INT_NUM + 6];
+
+static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d)
+{
+ d[0] = __raw_readl(base + IC_CFG0RD);
+ d[1] = __raw_readl(base + IC_CFG1RD);
+ d[2] = __raw_readl(base + IC_CFG2RD);
+ d[3] = __raw_readl(base + IC_SRCRD);
+ d[4] = __raw_readl(base + IC_ASSIGNRD);
+ d[5] = __raw_readl(base + IC_WAKERD);
+ d[6] = __raw_readl(base + IC_MASKRD);
+ ic_init(base); /* shut it up too while at it */
+}
+
+static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d)
+{
+ ic_init(base);
+
+ __raw_writel(d[0], base + IC_CFG0SET);
+ __raw_writel(d[1], base + IC_CFG1SET);
+ __raw_writel(d[2], base + IC_CFG2SET);
+ __raw_writel(d[3], base + IC_SRCSET);
+ __raw_writel(d[4], base + IC_ASSIGNSET);
+ __raw_writel(d[5], base + IC_WAKESET);
+ wmb();
+
+ __raw_writel(d[6], base + IC_MASKSET);
+ wmb();
+}
+
+static int alchemy_ic_suspend(void)
+{
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ return 0;
+}
+
+static void alchemy_ic_resume(void)
+{
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+}
+
+static int alchemy_gpic_suspend(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* save 4 interrupt mask status registers */
+ alchemy_gpic_pmdata[0] = __raw_readl(base + AU1300_GPIC_IEN + 0x0);
+ alchemy_gpic_pmdata[1] = __raw_readl(base + AU1300_GPIC_IEN + 0x4);
+ alchemy_gpic_pmdata[2] = __raw_readl(base + AU1300_GPIC_IEN + 0x8);
+ alchemy_gpic_pmdata[3] = __raw_readl(base + AU1300_GPIC_IEN + 0xc);
+
+ /* save misc register(s) */
+ alchemy_gpic_pmdata[4] = __raw_readl(base + AU1300_GPIC_DMASEL);
+
+ /* molto silenzioso */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* save pin/int-type configuration */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ alchemy_gpic_pmdata[i + 5] = __raw_readl(base + (i << 2));
+
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_gpic_resume(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* disable all first */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* restore pin/int-type configurations */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ __raw_writel(alchemy_gpic_pmdata[i + 5], base + (i << 2));
+ wmb();
+
+ /* restore misc register(s) */
+ base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ __raw_writel(alchemy_gpic_pmdata[4], base + AU1300_GPIC_DMASEL);
+ wmb();
+
+ /* finally restore masks */
+ __raw_writel(alchemy_gpic_pmdata[0], base + AU1300_GPIC_IEN + 0x0);
+ __raw_writel(alchemy_gpic_pmdata[1], base + AU1300_GPIC_IEN + 0x4);
+ __raw_writel(alchemy_gpic_pmdata[2], base + AU1300_GPIC_IEN + 0x8);
+ __raw_writel(alchemy_gpic_pmdata[3], base + AU1300_GPIC_IEN + 0xc);
+ wmb();
+}
+
+static struct syscore_ops alchemy_ic_pmops = {
+ .suspend = alchemy_ic_suspend,
+ .resume = alchemy_ic_resume,
+};
+
+static struct syscore_ops alchemy_gpic_pmops = {
+ .suspend = alchemy_gpic_suspend,
+ .resume = alchemy_gpic_resume,
+};
+
+/******************************************************************************/
+
+/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
+#define DISP(name, base, addr) \
+static void au1000_##name##_dispatch(struct irq_desc *d) \
+{ \
+ unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
+ if (likely(r)) \
+ generic_handle_irq(base + __ffs(r)); \
+ else \
+ spurious_interrupt(); \
+}
+
+DISP(ic0r0, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ0INT)
+DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
+DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
+DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
+
+static void alchemy_gpic_dispatch(struct irq_desc *d)
+{
+ int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
+ generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
+}
+
+/******************************************************************************/
+
+static void __init au1000_init_irq(struct alchemy_irqmap *map)
+{
+ unsigned int bit, irq_nr;
+ void __iomem *base;
+
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR));
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR));
+ register_syscore_ops(&alchemy_ic_pmops);
+ mips_cpu_irq_init();
+
+ /* register all 64 possible IC0+IC1 irq sources as type "none".
+ * Use set_irq_type() to set edge/level behaviour at runtime.
+ */
+ for (irq_nr = AU1000_INTC0_INT_BASE;
+ (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ for (irq_nr = AU1000_INTC1_INT_BASE;
+ (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ /*
+ * Initialize IC0, which is fixed per processor.
+ */
+ while (map->irq != -1) {
+ irq_nr = map->irq;
+
+ if (irq_nr >= AU1000_INTC1_INT_BASE) {
+ bit = irq_nr - AU1000_INTC1_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq_nr - AU1000_INTC0_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+ if (map->prio == 0)
+ __raw_writel(1 << bit, base + IC_ASSIGNSET);
+
+ au1x_ic_settype(irq_get_irq_data(irq_nr), map->type);
+ ++map;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, au1000_ic0r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, au1000_ic0r1_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, au1000_ic1r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, au1000_ic1r1_dispatch);
+}
+
+static void __init alchemy_gpic_init_irq(const struct alchemy_irqmap *dints)
+{
+ int i;
+ void __iomem *bank_base;
+
+ register_syscore_ops(&alchemy_gpic_pmops);
+ mips_cpu_irq_init();
+
+ /* disable & ack all possible interrupt sources */
+ for (i = 0; i < 4; i++) {
+ bank_base = AU1300_GPIC_ADDR + (i * 4);
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IDIS);
+ wmb();
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IPEND);
+ wmb();
+ }
+
+ /* register an irq_chip for them, with 2nd highest priority */
+ for (i = ALCHEMY_GPIC_INT_BASE; i <= ALCHEMY_GPIC_INT_LAST; i++) {
+ au1300_set_irq_priority(i, 1);
+ au1300_gpic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
+ }
+
+ /* setup known on-chip sources */
+ while ((i = dints->irq) != -1) {
+ au1300_gpic_settype(irq_get_irq_data(i), dints->type);
+ au1300_set_irq_priority(i, dints->prio);
+
+ if (dints->internal)
+ au1300_pinfunc_to_dev(i - ALCHEMY_GPIC_INT_BASE);
+
+ dints++;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, alchemy_gpic_dispatch);
+}
+
+/******************************************************************************/
+
+void __init arch_init_irq(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ au1000_init_irq(au1000_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1500:
+ au1000_init_irq(au1500_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1100:
+ au1000_init_irq(au1100_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_init_irq(au1550_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1000_init_irq(au1200_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_gpic_init_irq(au1300_irqmap);
+ break;
+ default:
+ pr_err("unknown Alchemy IRQ core\n");
+ break;
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long r = (read_c0_status() & read_c0_cause()) >> 8;
+ do_IRQ(MIPS_CPU_IRQ_BASE + __ffs(r & 0xff));
+}
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
new file mode 100644
index 000000000..b8f3397c5
--- /dev/null
+++ b/arch/mips/alchemy/common/platform.c
@@ -0,0 +1,458 @@
+/*
+ * Platform device support for Au1x00 SoCs.
+ *
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * (C) Copyright Embedded Alley Solutions, Inc 2005
+ * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
+#include <prom.h>
+
+static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int old_state)
+{
+#ifdef CONFIG_SERIAL_8250
+ switch (state) {
+ case 0:
+ alchemy_uart_enable(CPHYSADDR(port->membase));
+ serial8250_do_pm(port, state, old_state);
+ break;
+ case 3: /* power off */
+ serial8250_do_pm(port, state, old_state);
+ alchemy_uart_disable(CPHYSADDR(port->membase));
+ break;
+ default:
+ serial8250_do_pm(port, state, old_state);
+ break;
+ }
+#endif
+}
+
+#define PORT(_base, _irq) \
+ { \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_AU, \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
+ .pm = alchemy_8250_pm, \
+ }
+
+static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
+ PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1200] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
+ },
+ [ALCHEMY_CPU_AU1300] = {
+ PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT),
+ PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT),
+ PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT),
+ PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT),
+ },
+};
+
+static struct platform_device au1xx0_uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_AU1X00,
+};
+
+static void __init alchemy_setup_uarts(int ctype)
+{
+ long uartclk;
+ int s = sizeof(struct plat_serial8250_port);
+ int c = alchemy_get_uarts(ctype);
+ struct plat_serial8250_port *ports;
+ struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK);
+
+ if (IS_ERR(clk))
+ return;
+ if (clk_prepare_enable(clk)) {
+ clk_put(clk);
+ return;
+ }
+ uartclk = clk_get_rate(clk);
+ clk_put(clk);
+
+ ports = kcalloc(s, (c + 1), GFP_KERNEL);
+ if (!ports) {
+ printk(KERN_INFO "Alchemy: no memory for UART data\n");
+ return;
+ }
+ memcpy(ports, au1x00_uart_data[ctype], s * c);
+ au1xx0_uart_device.dev.platform_data = ports;
+
+ /* Fill up uartclk. */
+ for (s = 0; s < c; s++)
+ ports[s].uartclk = uartclk;
+ if (platform_device_register(&au1xx0_uart_device))
+ printk(KERN_INFO "Alchemy: failed to register UARTs\n");
+}
+
+
+static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
+
+/* Power on callback for the ehci platform driver */
+static int alchemy_ehci_power_on(struct platform_device *pdev)
+{
+ return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1);
+}
+
+/* Power off/suspend callback for the ehci platform driver */
+static void alchemy_ehci_power_off(struct platform_device *pdev)
+{
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
+}
+
+static struct usb_ehci_pdata alchemy_ehci_pdata = {
+ .no_io_watchdog = 1,
+ .power_on = alchemy_ehci_power_on,
+ .power_off = alchemy_ehci_power_off,
+ .power_suspend = alchemy_ehci_power_off,
+};
+
+/* Power on callback for the ohci platform driver */
+static int alchemy_ohci_power_on(struct platform_device *pdev)
+{
+ int unit;
+
+ unit = (pdev->id == 1) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+
+ return alchemy_usb_control(unit, 1);
+}
+
+/* Power off/suspend callback for the ohci platform driver */
+static void alchemy_ohci_power_off(struct platform_device *pdev)
+{
+ int unit;
+
+ unit = (pdev->id == 1) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+
+ alchemy_usb_control(unit, 0);
+}
+
+static struct usb_ohci_pdata alchemy_ohci_pdata = {
+ .power_on = alchemy_ohci_power_on,
+ .power_off = alchemy_ohci_power_off,
+ .power_suspend = alchemy_ohci_power_off,
+};
+
+static unsigned long alchemy_ohci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static unsigned long alchemy_ehci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static int __init _new_usbres(struct resource **r, struct platform_device **d)
+{
+ *r = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
+ if (!*r)
+ return -ENOMEM;
+ *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!*d) {
+ kfree(*r);
+ return -ENOMEM;
+ }
+
+ (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ (*d)->num_resources = 2;
+ (*d)->resource = *r;
+
+ return 0;
+}
+
+static void __init alchemy_setup_usb(int ctype)
+{
+ struct resource *res;
+ struct platform_device *pdev;
+
+ /* setup OHCI0. Every variant has one */
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ohci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ohci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ohci-platform";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ohci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n");
+
+
+ /* setup EHCI0: Au1200/Au1300 */
+ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ehci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ehci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ehci-platform";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ehci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n");
+ }
+
+ /* Au1300: OHCI1 */
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = AU1300_USB_OHCI1_PHYS_ADDR;
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = AU1300_USB_INT;
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ohci-platform";
+ pdev->id = 1;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ohci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n");
+ }
+}
+
+/* Macro to help defining the Ethernet MAC resources */
+#define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */
+#define MAC_RES(_base, _enable, _irq, _macdma) \
+ { \
+ .start = _base, \
+ .end = _base + 0xffff, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _enable, \
+ .end = _enable + 0x3, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _irq, \
+ .end = _irq, \
+ .flags = IORESOURCE_IRQ \
+ }, \
+ { \
+ .start = _macdma, \
+ .end = _macdma + 0x1ff, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1000_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC0_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR,
+ AU1500_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1100_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1550_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth0_device = {
+ .name = "au1000-eth",
+ .id = 0,
+ .num_resources = MAC_RES_COUNT,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth0_platform_data,
+ },
+};
+
+static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1000_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC1_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR + 4,
+ AU1500_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1550_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth1_device = {
+ .name = "au1000-eth",
+ .id = 1,
+ .num_resources = MAC_RES_COUNT,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth1_platform_data,
+ },
+};
+
+void __init au1xxx_override_eth_cfg(unsigned int port,
+ struct au1000_eth_platform_data *eth_data)
+{
+ if (!eth_data || port > 1)
+ return;
+
+ if (port == 0)
+ memcpy(&au1xxx_eth0_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+ else
+ memcpy(&au1xxx_eth1_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+}
+
+static void __init alchemy_setup_macs(int ctype)
+{
+ int ret, i;
+ unsigned char ethaddr[6];
+ struct resource *macres;
+
+ /* Handle 1st MAC */
+ if (alchemy_get_macs(ctype) < 1)
+ return;
+
+ macres = kmemdup(au1xxx_eth0_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
+ return;
+ }
+ au1xxx_eth0_device.resource = macres;
+
+ i = prom_get_ethernet_addr(ethaddr);
+ if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
+ memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
+
+ ret = platform_device_register(&au1xxx_eth0_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC0\n");
+
+
+ /* Handle 2nd MAC */
+ if (alchemy_get_macs(ctype) < 2)
+ return;
+
+ macres = kmemdup(au1xxx_eth1_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
+ return;
+ }
+ au1xxx_eth1_device.resource = macres;
+
+ ethaddr[5] += 1; /* next addr for 2nd MAC */
+ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
+ memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
+
+ /* Register second MAC if enabled in pinfunc */
+ if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) {
+ ret = platform_device_register(&au1xxx_eth1_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC1\n");
+ }
+}
+
+static int __init au1xxx_platform_init(void)
+{
+ int ctype = alchemy_get_cputype();
+
+ alchemy_setup_uarts(ctype);
+ alchemy_setup_macs(ctype);
+ alchemy_setup_usb(ctype);
+
+ return 0;
+}
+
+arch_initcall(au1xxx_platform_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
new file mode 100644
index 000000000..303257b69
--- /dev/null
+++ b/arch/mips/alchemy/common/power.c
@@ -0,0 +1,132 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Au1xx0 Power Management routines.
+ *
+ * Copyright 2001, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Some of the routines are right out of init/main.c, whose
+ * copyrights apply here.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pm.h>
+#include <linux/sysctl.h>
+#include <linux/jiffies.h>
+
+#include <linux/uaccess.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * We need to save/restore a bunch of core registers that are
+ * either volatile or reset to some state across a processor sleep.
+ * If reading a register doesn't provide a proper result for a
+ * later restore, we have to provide a function for loading that
+ * register and save a copy.
+ *
+ * We only have to save/restore registers that aren't otherwise
+ * done as part of a driver pm_* function.
+ */
+static unsigned int sleep_sys_clocks[5];
+static unsigned int sleep_sys_pinfunc;
+static unsigned int sleep_static_memctlr[4][3];
+
+
+static void save_core_regs(void)
+{
+ /* Clocks and PLLs. */
+ sleep_sys_clocks[0] = alchemy_rdsys(AU1000_SYS_FREQCTRL0);
+ sleep_sys_clocks[1] = alchemy_rdsys(AU1000_SYS_FREQCTRL1);
+ sleep_sys_clocks[2] = alchemy_rdsys(AU1000_SYS_CLKSRC);
+ sleep_sys_clocks[3] = alchemy_rdsys(AU1000_SYS_CPUPLL);
+ sleep_sys_clocks[4] = alchemy_rdsys(AU1000_SYS_AUXPLL);
+
+ /* pin mux config */
+ sleep_sys_pinfunc = alchemy_rdsys(AU1000_SYS_PINFUNC);
+
+ /* Save the static memory controller configuration. */
+ sleep_static_memctlr[0][0] = alchemy_rdsmem(AU1000_MEM_STCFG0);
+ sleep_static_memctlr[0][1] = alchemy_rdsmem(AU1000_MEM_STTIME0);
+ sleep_static_memctlr[0][2] = alchemy_rdsmem(AU1000_MEM_STADDR0);
+ sleep_static_memctlr[1][0] = alchemy_rdsmem(AU1000_MEM_STCFG1);
+ sleep_static_memctlr[1][1] = alchemy_rdsmem(AU1000_MEM_STTIME1);
+ sleep_static_memctlr[1][2] = alchemy_rdsmem(AU1000_MEM_STADDR1);
+ sleep_static_memctlr[2][0] = alchemy_rdsmem(AU1000_MEM_STCFG2);
+ sleep_static_memctlr[2][1] = alchemy_rdsmem(AU1000_MEM_STTIME2);
+ sleep_static_memctlr[2][2] = alchemy_rdsmem(AU1000_MEM_STADDR2);
+ sleep_static_memctlr[3][0] = alchemy_rdsmem(AU1000_MEM_STCFG3);
+ sleep_static_memctlr[3][1] = alchemy_rdsmem(AU1000_MEM_STTIME3);
+ sleep_static_memctlr[3][2] = alchemy_rdsmem(AU1000_MEM_STADDR3);
+}
+
+static void restore_core_regs(void)
+{
+ /* restore clock configuration. Writing CPUPLL last will
+ * stall a bit and stabilize other clocks (unless this is
+ * one of those Au1000 with a write-only PLL, where we dont
+ * have a valid value)
+ */
+ alchemy_wrsys(sleep_sys_clocks[0], AU1000_SYS_FREQCTRL0);
+ alchemy_wrsys(sleep_sys_clocks[1], AU1000_SYS_FREQCTRL1);
+ alchemy_wrsys(sleep_sys_clocks[2], AU1000_SYS_CLKSRC);
+ alchemy_wrsys(sleep_sys_clocks[4], AU1000_SYS_AUXPLL);
+ if (!au1xxx_cpu_has_pll_wo())
+ alchemy_wrsys(sleep_sys_clocks[3], AU1000_SYS_CPUPLL);
+
+ alchemy_wrsys(sleep_sys_pinfunc, AU1000_SYS_PINFUNC);
+
+ /* Restore the static memory controller configuration. */
+ alchemy_wrsmem(sleep_static_memctlr[0][0], AU1000_MEM_STCFG0);
+ alchemy_wrsmem(sleep_static_memctlr[0][1], AU1000_MEM_STTIME0);
+ alchemy_wrsmem(sleep_static_memctlr[0][2], AU1000_MEM_STADDR0);
+ alchemy_wrsmem(sleep_static_memctlr[1][0], AU1000_MEM_STCFG1);
+ alchemy_wrsmem(sleep_static_memctlr[1][1], AU1000_MEM_STTIME1);
+ alchemy_wrsmem(sleep_static_memctlr[1][2], AU1000_MEM_STADDR1);
+ alchemy_wrsmem(sleep_static_memctlr[2][0], AU1000_MEM_STCFG2);
+ alchemy_wrsmem(sleep_static_memctlr[2][1], AU1000_MEM_STTIME2);
+ alchemy_wrsmem(sleep_static_memctlr[2][2], AU1000_MEM_STADDR2);
+ alchemy_wrsmem(sleep_static_memctlr[3][0], AU1000_MEM_STCFG3);
+ alchemy_wrsmem(sleep_static_memctlr[3][1], AU1000_MEM_STTIME3);
+ alchemy_wrsmem(sleep_static_memctlr[3][2], AU1000_MEM_STADDR3);
+}
+
+void au_sleep(void)
+{
+ save_core_regs();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ alchemy_sleep_au1000();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ alchemy_sleep_au1550();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_sleep_au1300();
+ break;
+ }
+
+ restore_core_regs();
+}
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
new file mode 100644
index 000000000..b13d8adf3
--- /dev/null
+++ b/arch/mips/alchemy/common/prom.c
@@ -0,0 +1,145 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * PROM library initialisation code, supports YAMON and U-Boot.
+ *
+ * Copyright 2000-2001, 2006, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file was derived from Carsten Langgaard's
+ * arch/mips/mips-boards/xx files.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+
+int prom_argc;
+char **prom_argv;
+char **prom_envp;
+
+void __init prom_init_cmdline(void)
+{
+ int i;
+
+ for (i = 1; i < prom_argc; i++) {
+ strlcat(arcs_cmdline, prom_argv[i], COMMAND_LINE_SIZE);
+ if (i < (prom_argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+char *prom_getenv(char *envname)
+{
+ /*
+ * Return a pointer to the given environment variable.
+ * YAMON uses "name", "value" pairs, while U-Boot uses "name=value".
+ */
+
+ char **env = prom_envp;
+ int i = strlen(envname);
+ int yamon = (*env && strchr(*env, '=') == NULL);
+
+ while (*env) {
+ if (yamon) {
+ if (strcmp(envname, *env++) == 0)
+ return *env;
+ } else if (strncmp(envname, *env, i) == 0 && (*env)[i] == '=')
+ return *env + i + 1;
+ env++;
+ }
+
+ return NULL;
+}
+
+void __init prom_init(void)
+{
+ unsigned char *memsize_str;
+ unsigned long memsize;
+
+ prom_argc = (int)fw_arg0;
+ prom_argv = (char **)fw_arg1;
+ prom_envp = (char **)fw_arg2;
+
+ prom_init_cmdline();
+
+ memsize_str = prom_getenv("memsize");
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
+ memsize = SZ_64M; /* minimum memsize is 64MB RAM */
+
+ memblock_add(0, memsize);
+}
+
+static inline unsigned char str2hexnum(unsigned char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+
+ return 0; /* foo */
+}
+
+static inline void str2eaddr(unsigned char *ea, unsigned char *str)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = str2hexnum(*str++) << 4;
+ num |= str2hexnum(*str++);
+ ea[i] = num;
+ }
+}
+
+int __init prom_get_ethernet_addr(char *ethernet_addr)
+{
+ char *ethaddr_str;
+
+ /* Check the environment variables first */
+ ethaddr_str = prom_getenv("ethaddr");
+ if (!ethaddr_str) {
+ /* Check command line */
+ ethaddr_str = strstr(arcs_cmdline, "ethaddr=");
+ if (!ethaddr_str)
+ return -1;
+
+ ethaddr_str += strlen("ethaddr=");
+ }
+
+ str2eaddr(ethernet_addr, ethaddr_str);
+
+ return 0;
+}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
new file mode 100644
index 000000000..2388d6878
--- /dev/null
+++ b/arch/mips/alchemy/common/setup.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2000, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com
+ *
+ * Updates to 2.6, Pete Popov, Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+
+#include <asm/mipsregs.h>
+
+#include <au1000.h>
+
+extern void __init board_setup(void);
+extern void __init alchemy_set_lpj(void);
+
+static bool alchemy_dma_coherent(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ return false;
+ case ALCHEMY_CPU_AU1200:
+ /* Au1200 AB USB does not support coherent memory */
+ if ((read_c0_prid() & PRID_REV_MASK) == 0)
+ return false;
+ return true;
+ default:
+ return true;
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ alchemy_set_lpj();
+
+ if (au1xxx_cpu_needs_config_od())
+ /* Various early Au1xx0 errata corrected by this */
+ set_c0_config(1 << 19); /* Set Config[OD] */
+ else
+ /* Clear to obtain best system bus performance */
+ clear_c0_config(1 << 19); /* Clear Config[OD] */
+
+ dma_default_coherent = alchemy_dma_coherent();
+
+ board_setup(); /* board specific setup */
+
+ /* IO/MEM resources. */
+ set_io_port_base(0);
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+}
+
+#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
+/* This routine should be valid for all Au1x based boards */
+phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
+{
+ unsigned long start = ALCHEMY_PCI_MEMWIN_START;
+ unsigned long end = ALCHEMY_PCI_MEMWIN_END;
+
+ /* Don't fixup 36-bit addresses */
+ if ((phys_addr >> 32) != 0)
+ return phys_addr;
+
+ /* Check for PCI memory window */
+ if (phys_addr >= start && (phys_addr + size - 1) <= end)
+ return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
+
+ /* default nop */
+ return phys_addr;
+}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t phys_addr = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+
+ return remap_pfn_range(vma, vaddr, phys_addr >> PAGE_SHIFT, size, prot);
+}
+EXPORT_SYMBOL(io_remap_pfn_range);
+#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
new file mode 100644
index 000000000..13586d224
--- /dev/null
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2002 Embedded Edge, LLC
+ * Author: dan@embeddededge.com
+ *
+ * Sleep helper for Au1xxx sleep mode.
+ */
+
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .extern __flush_cache_all
+
+ .text
+ .set noreorder
+ .set noat
+ .align 5
+
+
+/* preparatory stuff */
+.macro SETUP_SLEEP
+ subu sp, PT_SIZE
+ sw $1, PT_R1(sp)
+ sw $2, PT_R2(sp)
+ sw $3, PT_R3(sp)
+ sw $4, PT_R4(sp)
+ sw $5, PT_R5(sp)
+ sw $6, PT_R6(sp)
+ sw $7, PT_R7(sp)
+ sw $16, PT_R16(sp)
+ sw $17, PT_R17(sp)
+ sw $18, PT_R18(sp)
+ sw $19, PT_R19(sp)
+ sw $20, PT_R20(sp)
+ sw $21, PT_R21(sp)
+ sw $22, PT_R22(sp)
+ sw $23, PT_R23(sp)
+ sw $26, PT_R26(sp)
+ sw $27, PT_R27(sp)
+ sw $28, PT_R28(sp)
+ sw $30, PT_R30(sp)
+ sw $31, PT_R31(sp)
+ mfc0 k0, CP0_STATUS
+ sw k0, 0x20(sp)
+ mfc0 k0, CP0_CONTEXT
+ sw k0, 0x1c(sp)
+ mfc0 k0, CP0_PAGEMASK
+ sw k0, 0x18(sp)
+ mfc0 k0, CP0_CONFIG
+ sw k0, 0x14(sp)
+
+ /* flush caches to make sure context is in memory */
+ la t1, __flush_cache_all
+ lw t0, 0(t1)
+ jalr t0
+ nop
+
+ /* Now set up the scratch registers so the boot rom will
+ * return to this point upon wakeup.
+ * sys_scratch0 : SP
+ * sys_scratch1 : RA
+ */
+ lui t3, 0xb190 /* sys_xxx */
+ sw sp, 0x0018(t3)
+ la k0, alchemy_sleep_wakeup /* resume path */
+ sw k0, 0x001c(t3)
+.endm
+
+.macro DO_SLEEP
+ /* put power supply and processor to sleep */
+ sw zero, 0x0078(t3) /* sys_slppwr */
+ sync
+ sw zero, 0x007c(t3) /* sys_sleep */
+ sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+.endm
+
+/* sleep code for Au1000/Au1100/Au1500 memory controller type */
+LEAF(alchemy_sleep_au1000)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set arch=r4000
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x001c(a0) /* Precharge */
+ sync
+ sw zero, 0x0020(a0) /* Auto Refresh */
+ sync
+ sw zero, 0x0030(a0) /* Sleep */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1000)
+
+/* sleep code for Au1550/Au1200 memory controller type */
+LEAF(alchemy_sleep_au1550)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set arch=r4000
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x08c0(a0) /* Precharge */
+ sync
+ sw zero, 0x08d0(a0) /* Self Refresh */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ beq t2, zero, 2b
+ nop
+
+ /* disable SDRAM clocks */
+ lui t0, 0xcfff
+ ori t0, t0, 0xffff
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1550)
+
+/* sleepcode for Au1300 memory controller type */
+LEAF(alchemy_sleep_au1300)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 2f
+ la t1, 4f
+ subu t2, t1, t0
+
+ .set arch=r4000
+
+1: cache 0x14, 0(t0)
+ subu t2, t2, 32
+ bgez t2, 1b
+ addu t0, t0, 32
+
+ .set mips0
+
+2: lui a0, 0xb400 /* mem_xxx */
+
+ /* disable all ports in mem_sdportcfga */
+ sw zero, 0x868(a0) /* mem_sdportcfga */
+ sync
+
+ /* disable ODT */
+ li t0, 0x03010000
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sync
+
+ /* precharge */
+ li t0, 0x23000400
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* auto refresh */
+ sw zero, 0x08c8(a0) /* mem_sdautoref */
+ sync
+
+ /* block access to the DDR */
+ lw t0, 0x0848(a0) /* mem_sdconfigb */
+ li t1, (1 << 7 | 0x3F)
+ or t0, t0, t1
+ sw t0, 0x0848(a0) /* mem_sdconfigb */
+ sync
+
+ /* issue the Self Refresh command */
+ li t0, 0x10000000
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0300
+3: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ bne t2, t0, 3b
+ nop
+
+ /* disable SDRAM clocks */
+ li t0, ~(3<<28)
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t1, t0 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+4:
+
+END(alchemy_sleep_au1300)
+
+
+ /* This is where we return upon wakeup.
+ * Reload all of the registers and return.
+ */
+LEAF(alchemy_sleep_wakeup)
+ lw k0, 0x20(sp)
+ mtc0 k0, CP0_STATUS
+ lw k0, 0x1c(sp)
+ mtc0 k0, CP0_CONTEXT
+ lw k0, 0x18(sp)
+ mtc0 k0, CP0_PAGEMASK
+ lw k0, 0x14(sp)
+ mtc0 k0, CP0_CONFIG
+
+ /* We need to catch the early Alchemy SOCs with
+ * the write-only Config[OD] bit and set it back to one...
+ */
+ jal au1x00_fixup_config_od
+ nop
+ lw $1, PT_R1(sp)
+ lw $2, PT_R2(sp)
+ lw $3, PT_R3(sp)
+ lw $4, PT_R4(sp)
+ lw $5, PT_R5(sp)
+ lw $6, PT_R6(sp)
+ lw $7, PT_R7(sp)
+ lw $16, PT_R16(sp)
+ lw $17, PT_R17(sp)
+ lw $18, PT_R18(sp)
+ lw $19, PT_R19(sp)
+ lw $20, PT_R20(sp)
+ lw $21, PT_R21(sp)
+ lw $22, PT_R22(sp)
+ lw $23, PT_R23(sp)
+ lw $26, PT_R26(sp)
+ lw $27, PT_R27(sp)
+ lw $28, PT_R28(sp)
+ lw $30, PT_R30(sp)
+ lw $31, PT_R31(sp)
+ jr ra
+ addiu sp, PT_SIZE
+END(alchemy_sleep_wakeup)
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
new file mode 100644
index 000000000..d794ffb67
--- /dev/null
+++ b/arch/mips/alchemy/common/time.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008-2009 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ * Previous incarnations were:
+ * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
+ * Copied and modified Carsten Langgaard's time.c
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * ########################################################################
+ *
+ * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
+ * databooks). Firmware/Board init code must enable the counters in the
+ * counter control register, otherwise the CP0 counter clocksource/event
+ * will be installed instead (and use of 'wait' instruction is prohibited).
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* 32kHz clock enabled and detected */
+#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
+
+static u64 au1x_counter1_read(struct clocksource *cs)
+{
+ return alchemy_rdsys(AU1000_SYS_RTCREAD);
+}
+
+static struct clocksource au1x_counter1_clocksource = {
+ .name = "alchemy-counter1",
+ .read = au1x_counter1_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .rating = 1500,
+};
+
+static int au1x_rtcmatch2_set_next_event(unsigned long delta,
+ struct clock_event_device *cd)
+{
+ delta += alchemy_rdsys(AU1000_SYS_RTCREAD);
+ /* wait for register access */
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M21)
+ ;
+ alchemy_wrsys(delta, AU1000_SYS_RTCMATCH2);
+
+ return 0;
+}
+
+static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+static struct clock_event_device au1x_rtcmatch2_clockdev = {
+ .name = "rtcmatch2",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 1500,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
+ .cpumask = cpu_possible_mask,
+};
+
+static int __init alchemy_time_init(unsigned int m2int)
+{
+ struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
+ unsigned long t;
+
+ au1x_rtcmatch2_clockdev.irq = m2int;
+
+ /* Check if firmware (YAMON, ...) has enabled 32kHz and clock
+ * has been detected. If so install the rtcmatch2 clocksource,
+ * otherwise don't bother. Note that both bits being set is by
+ * no means a definite guarantee that the counters actually work
+ * (the 32S bit seems to be stuck set to 1 once a single clock-
+ * edge is detected, hence the timeouts).
+ */
+ if (CNTR_OK != (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & CNTR_OK))
+ goto cntr_err;
+
+ /*
+ * setup counter 1 (RTC) to tick at full speed
+ */
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ alchemy_wrsys(0, AU1000_SYS_RTCTRIM); /* 32.768 kHz */
+
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+ alchemy_wrsys(0, AU1000_SYS_RTCWRITE);
+
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ /* register counter1 clocksource and event device */
+ clocksource_register_hz(&au1x_counter1_clocksource, 32768);
+
+ cd->shift = 32;
+ cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
+ cd->max_delta_ticks = 0xffffffff;
+ cd->min_delta_ns = clockevent_delta2ns(9, cd);
+ cd->min_delta_ticks = 9; /* ~0.28ms */
+ clockevents_register_device(cd);
+ if (request_irq(m2int, au1x_rtcmatch2_irq, IRQF_TIMER, "timer",
+ &au1x_rtcmatch2_clockdev))
+ pr_err("Failed to register timer interrupt\n");
+
+ printk(KERN_INFO "Alchemy clocksource installed\n");
+
+ return 0;
+
+cntr_err:
+ return -1;
+}
+
+static int alchemy_m2inttab[] __initdata = {
+ AU1000_RTC_MATCH2_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1550_RTC_MATCH2_INT,
+ AU1200_RTC_MATCH2_INT,
+ AU1300_RTC_MATCH2_INT,
+};
+
+void __init plat_time_init(void)
+{
+ int t;
+
+ t = alchemy_get_cputype();
+ if (t == ALCHEMY_CPU_UNKNOWN ||
+ alchemy_time_init(alchemy_m2inttab[t]))
+ cpu_wait = NULL; /* wait doesn't work with r4k timer */
+}
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
new file mode 100644
index 000000000..5d618547e
--- /dev/null
+++ b/arch/mips/alchemy/common/usb.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * USB block power/access management abstraction.
+ *
+ * Au1000+: The OHCI block control register is at the far end of the OHCI memory
+ * area. Au1550 has OHCI on different base address. No need to handle
+ * UDC here.
+ * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG
+ * as well as the PHY for EHCI and UDC.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <asm/cpu.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* control register offsets */
+#define AU1000_OHCICFG 0x7fffc
+#define AU1550_OHCICFG 0x07ffc
+#define AU1200_USBCFG 0x04
+
+/* Au1000 USB block config bits */
+#define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */
+#define USBHEN_CE (1 << 3) /* OHCI block clock enable */
+#define USBHEN_E (1 << 2) /* OHCI block enable */
+#define USBHEN_C (1 << 1) /* OHCI block coherency bit */
+#define USBHEN_BE (1 << 0) /* OHCI Big-Endian */
+
+/* Au1200 USB config bits */
+#define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */
+#define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */
+#define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */
+#define USBCFG_SSD (1 << 23) /* serial short detect en */
+#define USBCFG_PPE (1 << 19) /* HS PHY PLL */
+#define USBCFG_UCE (1 << 18) /* UDC clock enable */
+#define USBCFG_ECE (1 << 17) /* EHCI clock enable */
+#define USBCFG_OCE (1 << 16) /* OHCI clock enable */
+#define USBCFG_FLA(x) (((x) & 0x3f) << 8)
+#define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */
+#define USBCFG_GME (1 << 6) /* OTG mem access */
+#define USBCFG_DBE (1 << 5) /* UDC busmaster enable */
+#define USBCFG_DME (1 << 4) /* UDC mem enable */
+#define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */
+#define USBCFG_EME (1 << 2) /* EHCI mem enable */
+#define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */
+#define USBCFG_OME (1 << 0) /* OHCI mem enable */
+#define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\
+ USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \
+ USBCFG_GME | USBCFG_DBE | USBCFG_DME | \
+ USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
+ USBCFG_OME)
+
+/* Au1300 USB config registers */
+#define USB_DWC_CTRL1 0x00
+#define USB_DWC_CTRL2 0x04
+#define USB_VBUS_TIMER 0x10
+#define USB_SBUS_CTRL 0x14
+#define USB_MSR_ERR 0x18
+#define USB_DWC_CTRL3 0x1C
+#define USB_DWC_CTRL4 0x20
+#define USB_OTG_STATUS 0x28
+#define USB_DWC_CTRL5 0x2C
+#define USB_DWC_CTRL6 0x30
+#define USB_DWC_CTRL7 0x34
+#define USB_PHY_STATUS 0xC0
+#define USB_INT_STATUS 0xC4
+#define USB_INT_ENABLE 0xC8
+
+#define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */
+#define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */
+#define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */
+
+#define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */
+#define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */
+#define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */
+
+#define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19)
+#define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18)
+#define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17)
+#define USB_DWC_CTRL3_OTG0_CKEN (1 << 16)
+
+#define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */
+
+#define USB_INTEN_FORCE 0x20
+#define USB_INTEN_PHY 0x10
+#define USB_INTEN_UDC 0x08
+#define USB_INTEN_EHCI 0x04
+#define USB_INTEN_OHCI1 0x02
+#define USB_INTEN_OHCI0 0x01
+
+static DEFINE_SPINLOCK(alchemy_usb_lock);
+
+static inline void __au1300_usb_phyctl(void __iomem *base, int enable)
+{
+ unsigned long r, s;
+
+ r = __raw_readl(base + USB_DWC_CTRL2);
+ s = __raw_readl(base + USB_DWC_CTRL3);
+
+ s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN |
+ USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN;
+
+ if (enable) {
+ /* simply enable all PHYs */
+ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS;
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ } else if (!s) {
+ /* no USB block active, do disable all PHYs */
+ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS);
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ }
+}
+
+static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
+{
+ unsigned long r;
+
+ if (enable) {
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
+ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable); /* power up the PHYs */
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ /* reset the OHCI start clock bit */
+ __raw_writel(0, base + USB_DWC_CTRL7);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1);
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN);
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_otg_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ } else {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline int au1300_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1300_ohci_control(base, enable, 0);
+ break;
+ case ALCHEMY_USB_OHCI1:
+ __au1300_ohci_control(base, enable, 1);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1300_ehci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1300_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_OTG0:
+ __au1300_otg_control(base, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static inline void au1300_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+
+ /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4
+ * here at all: Port 2 routing (EHCI or UDC) must be set either
+ * by boot firmware or platform init code; I can't autodetect
+ * a sane setting.
+ */
+ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */
+ wmb();
+ __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */
+ wmb();
+ __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */
+ wmb();
+ __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */
+ wmb();
+ /* set coherent access bit */
+ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL);
+ wmb();
+}
+
+static inline void __au1200_ohci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(2000);
+ } else {
+ __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ } else {
+ if (!(r & USBCFG_UCE)) /* UDC also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ } else {
+ if (!(r & USBCFG_ECE)) /* EHCI also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG);
+ wmb();
+ }
+}
+
+static inline int au1200_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1200_ohci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1200_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1200_ehci_control(base, enable);
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+/* initialize USB block(s) to a known working state */
+static inline void au1200_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+ __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+}
+
+static inline int au1000_usb_init(unsigned long rb, int reg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
+ unsigned long r = __raw_readl(base);
+ struct clk *c;
+
+ /* 48MHz check. Don't init if no one can provide it */
+ c = clk_get(NULL, "usbh_clk");
+ if (IS_ERR(c))
+ return -ENODEV;
+ if (clk_round_rate(c, 48000000) != 48000000) {
+ clk_put(c);
+ return -ENODEV;
+ }
+ if (clk_set_rate(c, 48000000)) {
+ clk_put(c);
+ return -ENODEV;
+ }
+ clk_put(c);
+
+#if defined(__BIG_ENDIAN)
+ r |= USBHEN_BE;
+#endif
+ r |= USBHEN_C;
+
+ __raw_writel(r, base);
+ wmb();
+ udelay(1000);
+
+ return 0;
+}
+
+
+static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
+ unsigned long r = __raw_readl(base + creg);
+ struct clk *c = clk_get(NULL, "usbh_clk");
+
+ if (IS_ERR(c))
+ return;
+
+ if (enable) {
+ if (clk_prepare_enable(c))
+ goto out;
+
+ __raw_writel(r | USBHEN_CE, base + creg);
+ wmb();
+ udelay(1000);
+ __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg);
+ wmb();
+ udelay(1000);
+
+ /* wait for reset complete (read reg twice: au1500 erratum) */
+ while (__raw_readl(base + creg),
+ !(__raw_readl(base + creg) & USBHEN_RD))
+ udelay(1000);
+ } else {
+ __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg);
+ wmb();
+ clk_disable_unprepare(c);
+ }
+out:
+ clk_put(c);
+}
+
+static inline int au1000_usb_control(int block, int enable, unsigned long rb,
+ int creg)
+{
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1xx0_ohci_control(enable, rb, creg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+/*
+ * alchemy_usb_control - control Alchemy on-chip USB blocks
+ * @block: USB block to target
+ * @enable: set 1 to enable a block, 0 to disable
+ */
+int alchemy_usb_control(int block, int enable)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&alchemy_usb_lock, flags);
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = au1000_usb_control(block, enable,
+ AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = au1000_usb_control(block, enable,
+ AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ ret = au1200_usb_control(block, enable);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = au1300_usb_control(block, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(&alchemy_usb_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(alchemy_usb_control);
+
+
+static unsigned long alchemy_usb_pmdata[2];
+
+static void au1000_usb_pm(unsigned long br, int creg, int susp)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(br);
+
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + creg);
+ /* There appears to be some undocumented reset register.... */
+ __raw_writel(0, base + 0x04);
+ wmb();
+ __raw_writel(0, base + creg);
+ wmb();
+ } else {
+ __raw_writel(alchemy_usb_pmdata[0], base + creg);
+ wmb();
+ }
+}
+
+static void au1200_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR);
+ if (susp) {
+ /* save OTG_CAP/MUX registers which indicate port routing */
+ /* FIXME: write an OTG driver to do that */
+ alchemy_usb_pmdata[0] = __raw_readl(base + 0x00);
+ alchemy_usb_pmdata[1] = __raw_readl(base + 0x04);
+ } else {
+ /* restore access to all MMIO areas */
+ au1200_usb_init();
+
+ /* restore OTG_CAP/MUX registers */
+ __raw_writel(alchemy_usb_pmdata[0], base + 0x00);
+ __raw_writel(alchemy_usb_pmdata[1], base + 0x04);
+ wmb();
+ }
+}
+
+static void au1300_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ /* remember Port2 routing */
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4);
+ } else {
+ au1300_usb_init();
+ __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4);
+ wmb();
+ }
+}
+
+static void alchemy_usb_pm(int susp)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_pm(susp);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_pm(susp);
+ break;
+ }
+}
+
+static int alchemy_usb_suspend(void)
+{
+ alchemy_usb_pm(1);
+ return 0;
+}
+
+static void alchemy_usb_resume(void)
+{
+ alchemy_usb_pm(0);
+}
+
+static struct syscore_ops alchemy_usb_pm_ops = {
+ .suspend = alchemy_usb_suspend,
+ .resume = alchemy_usb_resume,
+};
+
+static int __init alchemy_usb_init(void)
+{
+ int ret = 0;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR,
+ AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR,
+ AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_init();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_init();
+ break;
+ }
+
+ if (!ret)
+ register_syscore_ops(&alchemy_usb_pm_ops);
+
+ return ret;
+}
+arch_initcall(alchemy_usb_init);
diff --git a/arch/mips/alchemy/common/vss.c b/arch/mips/alchemy/common/vss.c
new file mode 100644
index 000000000..3d0d468d9
--- /dev/null
+++ b/arch/mips/alchemy/common/vss.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Au1300 media block power gating (VSS)
+ *
+ * This is a stop-gap solution until I have the clock framework integration
+ * ready. This stuff here really must be handled transparently when clocks
+ * for various media blocks are enabled/disabled.
+ */
+
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <asm/mach-au1x00/au1000.h>
+
+#define VSS_GATE 0x00 /* gate wait timers */
+#define VSS_CLKRST 0x04 /* clock/block control */
+#define VSS_FTR 0x08 /* footers */
+
+#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
+
+static DEFINE_SPINLOCK(au1300_vss_lock);
+
+/* enable a block as outlined in the databook */
+static inline void __enable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
+ wmb();
+
+ __raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
+ wmb();
+
+ /* enable footers in sequence */
+ __raw_writel(0x01, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x03, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x07, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x0f, base + VSS_FTR);
+ wmb();
+
+ __raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
+ wmb();
+
+ __raw_writel(2, base + VSS_CLKRST); /* deassert reset */
+ wmb();
+
+ __raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
+ wmb();
+}
+
+/* disable a block as outlined in the databook */
+static inline void __disable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
+ wmb();
+ __raw_writel(0, base + VSS_GATE); /* disable FSM */
+ wmb();
+ __raw_writel(3, base + VSS_CLKRST); /* assert reset */
+ wmb();
+ __raw_writel(1, base + VSS_CLKRST); /* disable clock */
+ wmb();
+ __raw_writel(0, base + VSS_FTR); /* disable all footers */
+ wmb();
+}
+
+void au1300_vss_block_control(int block, int enable)
+{
+ unsigned long flags;
+
+ if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
+ return;
+
+ /* only one block at a time */
+ spin_lock_irqsave(&au1300_vss_lock, flags);
+ if (enable)
+ __enable_block(block);
+ else
+ __disable_block(block);
+ spin_unlock_irqrestore(&au1300_vss_lock, flags);
+}
+EXPORT_SYMBOL_GPL(au1300_vss_block_control);