From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/powerpc/platforms/4xx/Makefile | 7 + arch/powerpc/platforms/4xx/cpm.c | 332 +++++ arch/powerpc/platforms/4xx/gpio.c | 195 +++ arch/powerpc/platforms/4xx/hsta_msi.c | 207 +++ arch/powerpc/platforms/4xx/machine_check.c | 23 + arch/powerpc/platforms/4xx/pci.c | 2186 ++++++++++++++++++++++++++++ arch/powerpc/platforms/4xx/pci.h | 505 +++++++ arch/powerpc/platforms/4xx/soc.c | 217 +++ arch/powerpc/platforms/4xx/uic.c | 330 +++++ 9 files changed, 4002 insertions(+) create mode 100644 arch/powerpc/platforms/4xx/Makefile create mode 100644 arch/powerpc/platforms/4xx/cpm.c create mode 100644 arch/powerpc/platforms/4xx/gpio.c create mode 100644 arch/powerpc/platforms/4xx/hsta_msi.c create mode 100644 arch/powerpc/platforms/4xx/machine_check.c create mode 100644 arch/powerpc/platforms/4xx/pci.c create mode 100644 arch/powerpc/platforms/4xx/pci.h create mode 100644 arch/powerpc/platforms/4xx/soc.c create mode 100644 arch/powerpc/platforms/4xx/uic.c (limited to 'arch/powerpc/platforms/4xx') diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile new file mode 100644 index 000000000..2071a0abe --- /dev/null +++ b/arch/powerpc/platforms/4xx/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += uic.o machine_check.o +obj-$(CONFIG_4xx_SOC) += soc.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o +obj-$(CONFIG_PPC4xx_CPM) += cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c new file mode 100644 index 000000000..182e12855 --- /dev/null +++ b/arch/powerpc/platforms/4xx/cpm.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PowerPC 4xx Clock and Power Management + * + * Copyright (C) 2010, Applied Micro Circuits Corporation + * Victor Gallardo (vgallardo@apm.com) + * + * Based on arch/powerpc/platforms/44x/idle.c: + * Jerone Young + * Copyright 2008 IBM Corp. + * + * Based on arch/powerpc/sysdev/fsl_pmc.c: + * Anton Vorontsov + * Copyright 2009 MontaVista Software, Inc. + * + * See file CREDITS for list of people who contributed to this + * project. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CPM_ER 0 +#define CPM_FR 1 +#define CPM_SR 2 + +#define CPM_IDLE_WAIT 0 +#define CPM_IDLE_DOZE 1 + +struct cpm { + dcr_host_t dcr_host; + unsigned int dcr_offset[3]; + unsigned int powersave_off; + unsigned int unused; + unsigned int idle_doze; + unsigned int standby; + unsigned int suspend; +}; + +static struct cpm cpm; + +struct cpm_idle_mode { + unsigned int enabled; + const char *name; +}; + +static struct cpm_idle_mode idle_mode[] = { + [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */ + [CPM_IDLE_DOZE] = { 0, "doze" }, +}; + +static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) +{ + unsigned int value; + + /* CPM controller supports 3 different types of sleep interface + * known as class 1, 2 and 3. For class 1 units, they are + * unconditionally put to sleep when the corresponding CPM bit is + * set. For class 2 and 3 units this is not case; if they can be + * put to sleep, they will. Here we do not verify, we just + * set them and expect them to eventually go off when they can. + */ + value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); + dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask); + + /* return old state, to restore later if needed */ + return value; +} + +static void cpm_idle_wait(void) +{ + unsigned long msr_save; + + /* save off initial state */ + msr_save = mfmsr(); + /* sync required when CPM0_ER[CPU] is set */ + mb(); + /* set wait state MSR */ + mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); + isync(); + /* return to initial state */ + mtmsr(msr_save); + isync(); +} + +static void cpm_idle_sleep(unsigned int mask) +{ + unsigned int er_save; + + /* update CPM_ER state */ + er_save = cpm_set(CPM_ER, mask); + + /* go to wait state so that CPM0_ER[CPU] can take effect */ + cpm_idle_wait(); + + /* restore CPM_ER state */ + dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save); +} + +static void cpm_idle_doze(void) +{ + cpm_idle_sleep(cpm.idle_doze); +} + +static void cpm_idle_config(int mode) +{ + int i; + + if (idle_mode[mode].enabled) + return; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) + idle_mode[i].enabled = 0; + + idle_mode[mode].enabled = 1; +} + +static ssize_t cpm_idle_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int i; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { + if (idle_mode[i].enabled) + s += sprintf(s, "[%s] ", idle_mode[i].name); + else + s += sprintf(s, "%s ", idle_mode[i].name); + } + + *(s-1) = '\n'; /* convert the last space to a newline */ + + return s - buf; +} + +static ssize_t cpm_idle_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + int i; + char *p; + int len; + + p = memchr(buf, '\n', n); + len = p ? p - buf : n; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { + if (strncmp(buf, idle_mode[i].name, len) == 0) { + cpm_idle_config(i); + return n; + } + } + + return -EINVAL; +} + +static struct kobj_attribute cpm_idle_attr = + __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store); + +static void __init cpm_idle_config_sysfs(void) +{ + struct device *dev; + unsigned long ret; + + dev = get_cpu_device(0); + + ret = sysfs_create_file(&dev->kobj, + &cpm_idle_attr.attr); + if (ret) + printk(KERN_WARNING + "cpm: failed to create idle sysfs entry\n"); +} + +static void cpm_idle(void) +{ + if (idle_mode[CPM_IDLE_DOZE].enabled) + cpm_idle_doze(); + else + cpm_idle_wait(); +} + +static int cpm_suspend_valid(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + return !!cpm.standby; + case PM_SUSPEND_MEM: + return !!cpm.suspend; + default: + return 0; + } +} + +static void cpm_suspend_standby(unsigned int mask) +{ + unsigned long tcr_save; + + /* disable decrement interrupt */ + tcr_save = mfspr(SPRN_TCR); + mtspr(SPRN_TCR, tcr_save & ~TCR_DIE); + + /* go to sleep state */ + cpm_idle_sleep(mask); + + /* restore decrement interrupt */ + mtspr(SPRN_TCR, tcr_save); +} + +static int cpm_suspend_enter(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + cpm_suspend_standby(cpm.standby); + break; + case PM_SUSPEND_MEM: + cpm_suspend_standby(cpm.suspend); + break; + } + + return 0; +} + +static const struct platform_suspend_ops cpm_suspend_ops = { + .valid = cpm_suspend_valid, + .enter = cpm_suspend_enter, +}; + +static int __init cpm_get_uint_property(struct device_node *np, + const char *name) +{ + int len; + const unsigned int *prop = of_get_property(np, name, &len); + + if (prop == NULL || len < sizeof(u32)) + return 0; + + return *prop; +} + +static int __init cpm_init(void) +{ + struct device_node *np; + int dcr_base, dcr_len; + int ret = 0; + + if (!cpm.powersave_off) { + cpm_idle_config(CPM_IDLE_WAIT); + ppc_md.power_save = &cpm_idle; + } + + np = of_find_compatible_node(NULL, NULL, "ibm,cpm"); + if (!np) { + ret = -EINVAL; + goto out; + } + + dcr_base = dcr_resource_start(np, 0); + dcr_len = dcr_resource_len(np, 0); + + if (dcr_base == 0 || dcr_len == 0) { + printk(KERN_ERR "cpm: could not parse dcr property for %pOF\n", + np); + ret = -EINVAL; + goto node_put; + } + + cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); + + if (!DCR_MAP_OK(cpm.dcr_host)) { + printk(KERN_ERR "cpm: failed to map dcr property for %pOF\n", + np); + ret = -EINVAL; + goto node_put; + } + + /* All 4xx SoCs with a CPM controller have one of two + * different order for the CPM registers. Some have the + * CPM registers in the following order (ER,FR,SR). The + * others have them in the following order (SR,ER,FR). + */ + + if (cpm_get_uint_property(np, "er-offset") == 0) { + cpm.dcr_offset[CPM_ER] = 0; + cpm.dcr_offset[CPM_FR] = 1; + cpm.dcr_offset[CPM_SR] = 2; + } else { + cpm.dcr_offset[CPM_ER] = 1; + cpm.dcr_offset[CPM_FR] = 2; + cpm.dcr_offset[CPM_SR] = 0; + } + + /* Now let's see what IPs to turn off for the following modes */ + + cpm.unused = cpm_get_uint_property(np, "unused-units"); + cpm.idle_doze = cpm_get_uint_property(np, "idle-doze"); + cpm.standby = cpm_get_uint_property(np, "standby"); + cpm.suspend = cpm_get_uint_property(np, "suspend"); + + /* If some IPs are unused let's turn them off now */ + + if (cpm.unused) { + cpm_set(CPM_ER, cpm.unused); + cpm_set(CPM_FR, cpm.unused); + } + + /* Now let's export interfaces */ + + if (!cpm.powersave_off && cpm.idle_doze) + cpm_idle_config_sysfs(); + + if (cpm.standby || cpm.suspend) + suspend_set_ops(&cpm_suspend_ops); +node_put: + of_node_put(np); +out: + return ret; +} + +late_initcall(cpm_init); + +static int __init cpm_powersave_off(char *arg) +{ + cpm.powersave_off = 1; + return 1; +} +__setup("powersave=off", cpm_powersave_off); diff --git a/arch/powerpc/platforms/4xx/gpio.c b/arch/powerpc/platforms/4xx/gpio.c new file mode 100644 index 000000000..49ee8d365 --- /dev/null +++ b/arch/powerpc/platforms/4xx/gpio.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PPC4xx gpio driver + * + * Copyright (c) 2008 Harris Corporation + * Copyright (c) 2008 Sascha Hauer , Pengutronix + * Copyright (c) MontaVista Software, Inc. 2008. + * + * Author: Steve Falco + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GPIO_MASK(gpio) (0x80000000 >> (gpio)) +#define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2)) + +/* Physical GPIO register layout */ +struct ppc4xx_gpio { + __be32 or; + __be32 tcr; + __be32 osrl; + __be32 osrh; + __be32 tsrl; + __be32 tsrh; + __be32 odr; + __be32 ir; + __be32 rr1; + __be32 rr2; + __be32 rr3; + __be32 reserved1; + __be32 isr1l; + __be32 isr1h; + __be32 isr2l; + __be32 isr2h; + __be32 isr3l; + __be32 isr3h; +}; + +struct ppc4xx_gpio_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; +}; + +/* + * GPIO LIB API implementation for GPIOs + * + * There are a maximum of 32 gpios in each gpio controller. + */ + +static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + + return !!(in_be32(®s->ir) & GPIO_MASK(gpio)); +} + +static inline void +__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + + if (val) + setbits32(®s->or, GPIO_MASK(gpio)); + else + clrbits32(®s->or, GPIO_MASK(gpio)); +} + +static void +ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + __ppc4xx_gpio_set(gc, gpio, val); + + spin_unlock_irqrestore(&chip->lock, flags); + + pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); +} + +static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + /* Disable open-drain function */ + clrbits32(®s->odr, GPIO_MASK(gpio)); + + /* Float the pin */ + clrbits32(®s->tcr, GPIO_MASK(gpio)); + + /* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */ + if (gpio < 16) { + clrbits32(®s->osrl, GPIO_MASK2(gpio)); + clrbits32(®s->tsrl, GPIO_MASK2(gpio)); + } else { + clrbits32(®s->osrh, GPIO_MASK2(gpio)); + clrbits32(®s->tsrh, GPIO_MASK2(gpio)); + } + + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int +ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + /* First set initial value */ + __ppc4xx_gpio_set(gc, gpio, val); + + /* Disable open-drain function */ + clrbits32(®s->odr, GPIO_MASK(gpio)); + + /* Drive the pin */ + setbits32(®s->tcr, GPIO_MASK(gpio)); + + /* Bits 0-15 use TSRL, bits 16-31 use TSRH */ + if (gpio < 16) { + clrbits32(®s->osrl, GPIO_MASK2(gpio)); + clrbits32(®s->tsrl, GPIO_MASK2(gpio)); + } else { + clrbits32(®s->osrh, GPIO_MASK2(gpio)); + clrbits32(®s->tsrh, GPIO_MASK2(gpio)); + } + + spin_unlock_irqrestore(&chip->lock, flags); + + pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; +} + +static int __init ppc4xx_add_gpiochips(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") { + int ret; + struct ppc4xx_gpio_chip *ppc4xx_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL); + if (!ppc4xx_gc) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&ppc4xx_gc->lock); + + mm_gc = &ppc4xx_gc->mm_gc; + gc = &mm_gc->gc; + + gc->ngpio = 32; + gc->direction_input = ppc4xx_gpio_dir_in; + gc->direction_output = ppc4xx_gpio_dir_out; + gc->get = ppc4xx_gpio_get; + gc->set = ppc4xx_gpio_set; + + ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); + if (ret) + goto err; + continue; +err: + pr_err("%pOF: registration failed with status %d\n", np, ret); + kfree(ppc4xx_gc); + /* try others anyway */ + } + return 0; +} +arch_initcall(ppc4xx_add_gpiochips); diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c new file mode 100644 index 000000000..d4f7fff1f --- /dev/null +++ b/arch/powerpc/platforms/4xx/hsta_msi.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MSI support for PPC4xx SoCs using High Speed Transfer Assist (HSTA) for + * generation of the interrupt. + * + * Copyright © 2013 Alistair Popple IBM Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ppc4xx_hsta_msi { + struct device *dev; + + /* The ioremapped HSTA MSI IO space */ + u32 __iomem *data; + + /* Physical address of HSTA MSI IO space */ + u64 address; + struct msi_bitmap bmp; + + /* An array mapping offsets to hardware IRQs */ + int *irq_map; + + /* Number of hwirqs supported */ + int irq_count; +}; +static struct ppc4xx_hsta_msi ppc4xx_hsta_msi; + +static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_msg msg; + struct msi_desc *entry; + int irq, hwirq; + u64 addr; + + /* We don't support MSI-X */ + if (type == PCI_CAP_ID_MSIX) { + pr_debug("%s: MSI-X not supported.\n", __func__); + return -EINVAL; + } + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { + irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1); + if (irq < 0) { + pr_debug("%s: Failed to allocate msi interrupt\n", + __func__); + return irq; + } + + hwirq = ppc4xx_hsta_msi.irq_map[irq]; + if (!hwirq) { + pr_err("%s: Failed mapping irq %d\n", __func__, irq); + return -EINVAL; + } + + /* + * HSTA generates interrupts on writes to 128-bit aligned + * addresses. + */ + addr = ppc4xx_hsta_msi.address + irq*0x10; + msg.address_hi = upper_32_bits(addr); + msg.address_lo = lower_32_bits(addr); + + /* Data is not used by the HSTA. */ + msg.data = 0; + + pr_debug("%s: Setup irq %d (0x%0llx)\n", __func__, hwirq, + (((u64) msg.address_hi) << 32) | msg.address_lo); + + if (irq_set_msi_desc(hwirq, entry)) { + pr_err( + "%s: Invalid hwirq %d specified in device tree\n", + __func__, hwirq); + msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); + return -EINVAL; + } + pci_write_msi_msg(hwirq, &msg); + } + + return 0; +} + +static int hsta_find_hwirq_offset(int hwirq) +{ + int irq; + + /* Find the offset given the hwirq */ + for (irq = 0; irq < ppc4xx_hsta_msi.irq_count; irq++) + if (ppc4xx_hsta_msi.irq_map[irq] == hwirq) + return irq; + + return -EINVAL; +} + +static void hsta_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *entry; + int irq; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { + irq = hsta_find_hwirq_offset(entry->irq); + + /* entry->irq should always be in irq_map */ + BUG_ON(irq < 0); + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); + pr_debug("%s: Teardown IRQ %u (index %u)\n", __func__, + entry->irq, irq); + } +} + +static int hsta_msi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *mem; + int irq, ret, irq_count; + struct pci_controller *phb; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "Unable to get mmio space\n"); + return -EINVAL; + } + + irq_count = of_irq_count(dev->of_node); + if (!irq_count) { + dev_err(dev, "Unable to find IRQ range\n"); + return -EINVAL; + } + + ppc4xx_hsta_msi.dev = dev; + ppc4xx_hsta_msi.address = mem->start; + ppc4xx_hsta_msi.data = ioremap(mem->start, resource_size(mem)); + ppc4xx_hsta_msi.irq_count = irq_count; + if (!ppc4xx_hsta_msi.data) { + dev_err(dev, "Unable to map memory\n"); + return -ENOMEM; + } + + ret = msi_bitmap_alloc(&ppc4xx_hsta_msi.bmp, irq_count, dev->of_node); + if (ret) + goto out; + + ppc4xx_hsta_msi.irq_map = kmalloc_array(irq_count, sizeof(int), + GFP_KERNEL); + if (!ppc4xx_hsta_msi.irq_map) { + ret = -ENOMEM; + goto out1; + } + + /* Setup a mapping from irq offsets to hardware irq numbers */ + for (irq = 0; irq < irq_count; irq++) { + ppc4xx_hsta_msi.irq_map[irq] = + irq_of_parse_and_map(dev->of_node, irq); + if (!ppc4xx_hsta_msi.irq_map[irq]) { + dev_err(dev, "Unable to map IRQ\n"); + ret = -EINVAL; + goto out2; + } + } + + list_for_each_entry(phb, &hose_list, list_node) { + phb->controller_ops.setup_msi_irqs = hsta_setup_msi_irqs; + phb->controller_ops.teardown_msi_irqs = hsta_teardown_msi_irqs; + } + return 0; + +out2: + kfree(ppc4xx_hsta_msi.irq_map); + +out1: + msi_bitmap_free(&ppc4xx_hsta_msi.bmp); + +out: + iounmap(ppc4xx_hsta_msi.data); + return ret; +} + +static const struct of_device_id hsta_msi_ids[] = { + { + .compatible = "ibm,hsta-msi", + }, + {} +}; + +static struct platform_driver hsta_msi_driver = { + .probe = hsta_msi_probe, + .driver = { + .name = "hsta-msi", + .of_match_table = hsta_msi_ids, + }, +}; + +static int hsta_msi_init(void) +{ + return platform_driver_register(&hsta_msi_driver); +} +subsys_initcall(hsta_msi_init); diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c new file mode 100644 index 000000000..a905da1d6 --- /dev/null +++ b/arch/powerpc/platforms/4xx/machine_check.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + */ + +#include +#include +#include + +#include + +int machine_check_4xx(struct pt_regs *regs) +{ + unsigned long reason = regs->esr; + + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + printk(" machine check in kernel mode.\n"); + + return 0; +} diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c new file mode 100644 index 000000000..ca5dd7a58 --- /dev/null +++ b/arch/powerpc/platforms/4xx/pci.c @@ -0,0 +1,2186 @@ +/* + * PCI / PCI-X / PCI-Express support for 4xx parts + * + * Copyright 2007 Ben. Herrenschmidt , IBM Corp. + * + * Most PCI Express code is coming from Stefan Roese implementation for + * arch/ppc in the Denx tree, slightly reworked by me. + * + * Copyright 2007 DENX Software Engineering, Stefan Roese + * + * Some of that comes itself from a previous implementation for 440SPE only + * by Roland Dreier: + * + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Roland Dreier + * + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "pci.h" + +static int dma_offset_set; + +#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL)) +#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32)) + +#define RES_TO_U32_LOW(val) \ + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val)) +#define RES_TO_U32_HIGH(val) \ + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0)) + +static inline int ppc440spe_revA(void) +{ + /* Catch both 440SPe variants, with and without RAID6 support */ + if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890) + return 1; + else + return 0; +} + +static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev) +{ + struct pci_controller *hose; + int i; + + if (dev->devfn != 0 || dev->bus->self != NULL) + return; + + hose = pci_bus_to_host(dev->bus); + if (hose == NULL) + return; + + if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") && + !of_device_is_compatible(hose->dn, "ibm,plb-pcix") && + !of_device_is_compatible(hose->dn, "ibm,plb-pci")) + return; + + if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") || + of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) { + hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM; + } + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge); + +static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, + void __iomem *reg, + struct resource *res) +{ + u64 size; + const u32 *ranges; + int rlen; + int pna = of_n_addr_cells(hose->dn); + int np = pna + 5; + + /* Default */ + res->start = 0; + size = 0x80000000; + res->end = size - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; + + /* Get dma-ranges property */ + ranges = of_get_property(hose->dn, "dma-ranges", &rlen); + if (ranges == NULL) + goto out; + + /* Walk it */ + while ((rlen -= np * 4) >= 0) { + u32 pci_space = ranges[0]; + u64 pci_addr = of_read_number(ranges + 1, 2); + u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3); + size = of_read_number(ranges + pna + 3, 2); + ranges += np; + if (cpu_addr == OF_BAD_ADDR || size == 0) + continue; + + /* We only care about memory */ + if ((pci_space & 0x03000000) != 0x02000000) + continue; + + /* We currently only support memory at 0, and pci_addr + * within 32 bits space + */ + if (cpu_addr != 0 || pci_addr > 0xffffffff) { + printk(KERN_WARNING "%pOF: Ignored unsupported dma range" + " 0x%016llx...0x%016llx -> 0x%016llx\n", + hose->dn, + pci_addr, pci_addr + size - 1, cpu_addr); + continue; + } + + /* Check if not prefetchable */ + if (!(pci_space & 0x40000000)) + res->flags &= ~IORESOURCE_PREFETCH; + + + /* Use that */ + res->start = pci_addr; + /* Beware of 32 bits resources */ + if (sizeof(resource_size_t) == sizeof(u32) && + (pci_addr + size) > 0x100000000ull) + res->end = 0xffffffff; + else + res->end = res->start + size - 1; + break; + } + + /* We only support one global DMA offset */ + if (dma_offset_set && pci_dram_offset != res->start) { + printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn); + return -ENXIO; + } + + /* Check that we can fit all of memory as we don't support + * DMA bounce buffers + */ + if (size < total_memory) { + printk(KERN_ERR "%pOF: dma-ranges too small " + "(size=%llx total_memory=%llx)\n", + hose->dn, size, (u64)total_memory); + return -ENXIO; + } + + /* Check we are a power of 2 size and that base is a multiple of size*/ + if ((size & (size - 1)) != 0 || + (res->start & (size - 1)) != 0) { + printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn); + return -ENXIO; + } + + /* Check that we are fully contained within 32 bits space if we are not + * running on a 460sx or 476fpe which have 64 bit bus addresses. + */ + if (res->end > 0xffffffff && + !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx") + || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) { + printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n", + hose->dn); + return -ENXIO; + } + out: + dma_offset_set = 1; + pci_dram_offset = res->start; + hose->dma_window_base_cur = res->start; + hose->dma_window_size = resource_size(res); + + printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n", + pci_dram_offset); + printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n", + (unsigned long long)hose->dma_window_base_cur); + printk(KERN_INFO "DMA window size 0x%016llx\n", + (unsigned long long)hose->dma_window_size); + return 0; +} + +/* + * 4xx PCI 2.x part + */ + +static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 ma, pcila, pciha; + + /* Hack warning ! The "old" PCI 2.x cell only let us configure the low + * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit + * address are actually hard wired to a value that appears to depend + * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx. + * + * The trick here is we just crop those top bits and ignore them when + * programming the chip. That means the device-tree has to be right + * for the specific part used (we don't print a warning if it's wrong + * but on the other hand, you'll crash quickly enough), but at least + * this code should work whatever the hard coded value is + */ + plb_addr &= 0xffffffffull; + + /* Note: Due to the above hack, the test below doesn't actually test + * if you address is above 4G, but it tests that address and + * (address + size) are both contained in the same 4G + */ + if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || + size < 0x1000 || (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn); + return -1; + } + ma = (0xffffffffu << ilog2(size)) | 1; + if (flags & IORESOURCE_PREFETCH) + ma |= 2; + + pciha = RES_TO_U32_HIGH(pci_addr); + pcila = RES_TO_U32_LOW(pci_addr); + + writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index)); + writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index)); + writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index)); + writel(ma, reg + PCIL0_PMM0MA + (0x10 * index)); + + return 0; +} + +static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, + void __iomem *reg) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + resource_size_t offset = hose->mem_offset[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 2) { + printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pci_PMM(hose, reg, + res->start, + res->start - offset, + resource_size(res), + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 2 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); +} + +static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose, + void __iomem *reg, + const struct resource *res) +{ + resource_size_t size = resource_size(res); + u32 sa; + + /* Calculate window size */ + sa = (0xffffffffu << ilog2(size)) | 1; + sa |= 0x1; + + /* RAM is always at 0 local for now */ + writel(0, reg + PCIL0_PTM1LA); + writel(sa, reg + PCIL0_PTM1MS); + + /* Map on PCI side */ + early_write_config_dword(hose, hose->first_busno, 0, + PCI_BASE_ADDRESS_1, res->start); + early_write_config_dword(hose, hose->first_busno, 0, + PCI_BASE_ADDRESS_2, 0x00000000); + early_write_config_word(hose, hose->first_busno, 0, + PCI_COMMAND, 0x0006); +} + +static void __init ppc4xx_probe_pci_bridge(struct device_node *np) +{ + /* NYI */ + struct resource rsrc_cfg; + struct resource rsrc_reg; + struct resource dma_window; + struct pci_controller *hose = NULL; + void __iomem *reg = NULL; + const int *bus_range; + int primary = 0; + + /* Check if device is enabled */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np); + return; + } + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &rsrc_cfg)) { + printk(KERN_ERR "%pOF: Can't get PCI config register base !", + np); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 3, &rsrc_reg)) { + printk(KERN_ERR "%pOF: Can't get PCI internal register base !", + np); + return; + } + + /* Check if primary bridge */ + if (of_get_property(np, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(np, "bus-range", NULL); + + /* Map registers */ + reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); + if (reg == NULL) { + printk(KERN_ERR "%pOF: Can't map registers !", np); + goto fail; + } + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(np); + if (!hose) + goto fail; + + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Setup config space */ + setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0); + + /* Disable all windows */ + writel(0, reg + PCIL0_PMM0MA); + writel(0, reg + PCIL0_PMM1MA); + writel(0, reg + PCIL0_PMM2MA); + writel(0, reg + PCIL0_PTM1MS); + writel(0, reg + PCIL0_PTM2MS); + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, np, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pci_PMMs(hose, reg); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pci_PTMs(hose, reg, &dma_window); + + /* We don't need the registers anymore */ + iounmap(reg); + return; + + fail: + if (hose) + pcibios_free_controller(hose); + if (reg) + iounmap(reg); +} + +/* + * 4xx PCI-X part + */ + +static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || size < 0x1000 || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%pOF: Resource out of range\n", + hose->dn); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + if (index == 0) { + writel(lah, reg + PCIX0_POM0LAH); + writel(lal, reg + PCIX0_POM0LAL); + writel(pciah, reg + PCIX0_POM0PCIAH); + writel(pcial, reg + PCIX0_POM0PCIAL); + writel(sa, reg + PCIX0_POM0SA); + } else { + writel(lah, reg + PCIX0_POM1LAH); + writel(lal, reg + PCIX0_POM1LAL); + writel(pciah, reg + PCIX0_POM1PCIAH); + writel(pcial, reg + PCIX0_POM1PCIAL); + writel(sa, reg + PCIX0_POM1SA); + } + + return 0; +} + +static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, + void __iomem *reg) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + resource_size_t offset = hose->mem_offset[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 1) { + printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pcix_POM(hose, reg, + res->start, + res->start - offset, + resource_size(res), + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); +} + +static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose, + void __iomem *reg, + const struct resource *res, + int big_pim, + int enable_msi_hole) +{ + resource_size_t size = resource_size(res); + u32 sa; + + /* RAM is always at 0 */ + writel(0x00000000, reg + PCIX0_PIM0LAH); + writel(0x00000000, reg + PCIX0_PIM0LAL); + + /* Calculate window size */ + sa = (0xffffffffu << ilog2(size)) | 1; + sa |= 0x1; + if (res->flags & IORESOURCE_PREFETCH) + sa |= 0x2; + if (enable_msi_hole) + sa |= 0x4; + writel(sa, reg + PCIX0_PIM0SA); + if (big_pim) + writel(0xffffffff, reg + PCIX0_PIM0SAH); + + /* Map on PCI side */ + writel(0x00000000, reg + PCIX0_BAR0H); + writel(res->start, reg + PCIX0_BAR0L); + writew(0x0006, reg + PCIX0_COMMAND); +} + +static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) +{ + struct resource rsrc_cfg; + struct resource rsrc_reg; + struct resource dma_window; + struct pci_controller *hose = NULL; + void __iomem *reg = NULL; + const int *bus_range; + int big_pim = 0, msi = 0, primary = 0; + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &rsrc_cfg)) { + printk(KERN_ERR "%pOF: Can't get PCI-X config register base !", + np); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 3, &rsrc_reg)) { + printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !", + np); + return; + } + + /* Check if it supports large PIMs (440GX) */ + if (of_get_property(np, "large-inbound-windows", NULL)) + big_pim = 1; + + /* Check if we should enable MSIs inbound hole */ + if (of_get_property(np, "enable-msi-hole", NULL)) + msi = 1; + + /* Check if primary bridge */ + if (of_get_property(np, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(np, "bus-range", NULL); + + /* Map registers */ + reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); + if (reg == NULL) { + printk(KERN_ERR "%pOF: Can't map registers !", np); + goto fail; + } + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(np); + if (!hose) + goto fail; + + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Setup config space */ + setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, + PPC_INDIRECT_TYPE_SET_CFG_TYPE); + + /* Disable all windows */ + writel(0, reg + PCIX0_POM0SA); + writel(0, reg + PCIX0_POM1SA); + writel(0, reg + PCIX0_POM2SA); + writel(0, reg + PCIX0_PIM0SA); + writel(0, reg + PCIX0_PIM1SA); + writel(0, reg + PCIX0_PIM2SA); + if (big_pim) { + writel(0, reg + PCIX0_PIM0SAH); + writel(0, reg + PCIX0_PIM2SAH); + } + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, np, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pcix_POMs(hose, reg); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi); + + /* We don't need the registers anymore */ + iounmap(reg); + return; + + fail: + if (hose) + pcibios_free_controller(hose); + if (reg) + iounmap(reg); +} + +#ifdef CONFIG_PPC4xx_PCI_EXPRESS + +/* + * 4xx PCI-Express part + * + * We support 3 parts currently based on the compatible property: + * + * ibm,plb-pciex-440spe + * ibm,plb-pciex-405ex + * ibm,plb-pciex-460ex + * + * Anything else will be rejected for now as they are all subtly + * different unfortunately. + * + */ + +#define MAX_PCIE_BUS_MAPPED 0x40 + +struct ppc4xx_pciex_port +{ + struct pci_controller *hose; + struct device_node *node; + unsigned int index; + int endpoint; + int link; + int has_ibpre; + unsigned int sdr_base; + dcr_host_t dcrs; + struct resource cfg_space; + struct resource utl_regs; + void __iomem *utl_base; +}; + +static struct ppc4xx_pciex_port *ppc4xx_pciex_ports; +static unsigned int ppc4xx_pciex_port_count; + +struct ppc4xx_pciex_hwops +{ + bool want_sdr; + int (*core_init)(struct device_node *np); + int (*port_init_hw)(struct ppc4xx_pciex_port *port); + int (*setup_utl)(struct ppc4xx_pciex_port *port); + void (*check_link)(struct ppc4xx_pciex_port *port); +}; + +static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; + +static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port, + unsigned int sdr_offset, + unsigned int mask, + unsigned int value, + int timeout_ms) +{ + u32 val; + + while(timeout_ms--) { + val = mfdcri(SDR0, port->sdr_base + sdr_offset); + if ((val & mask) == value) { + pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n", + port->index, sdr_offset, timeout_ms, val); + return 0; + } + msleep(1); + } + return -1; +} + +static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port) +{ + /* Wait for reset to complete */ + if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) { + printk(KERN_WARNING "PCIE%d: PGRST failed\n", + port->index); + return -1; + } + return 0; +} + + +static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) +{ + printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); + + /* Check for card presence detect if supported, if not, just wait for + * link unconditionally. + * + * note that we don't fail if there is no link, we just filter out + * config space accesses. That way, it will be easier to implement + * hotplug later on. + */ + if (!port->has_ibpre || + !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP, + 1 << 28, 1 << 28, 100)) { + printk(KERN_INFO + "PCIE%d: Device detected, waiting for link...\n", + port->index); + if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP, + 0x1000, 0x1000, 2000)) + printk(KERN_WARNING + "PCIE%d: Link up failed\n", port->index); + else { + printk(KERN_INFO + "PCIE%d: link is up !\n", port->index); + port->link = 1; + } + } else + printk(KERN_INFO "PCIE%d: No device detected.\n", port->index); +} + +#ifdef CONFIG_44x + +/* Check various reset bits of the 440SPe PCIe core */ +static int __init ppc440spe_pciex_check_reset(struct device_node *np) +{ + u32 valPE0, valPE1, valPE2; + int err = 0; + + /* SDR0_PEGPLLLCT1 reset */ + if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) { + /* + * the PCIe core was probably already initialised + * by firmware - let's re-reset RCSSET regs + * + * -- Shouldn't we also re-reset the whole thing ? -- BenH + */ + pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n"); + mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000); + mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000); + mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000); + } + + valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET); + valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET); + valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET); + + /* SDR0_PExRCSSET rstgu */ + if (!(valPE0 & 0x01000000) || + !(valPE1 & 0x01000000) || + !(valPE2 & 0x01000000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rstdl */ + if (!(valPE0 & 0x00010000) || + !(valPE1 & 0x00010000) || + !(valPE2 & 0x00010000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rstpyn */ + if ((valPE0 & 0x00001000) || + (valPE1 & 0x00001000) || + (valPE2 & 0x00001000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n"); + err = -1; + } + + /* SDR0_PExRCSSET hldplb */ + if ((valPE0 & 0x10000000) || + (valPE1 & 0x10000000) || + (valPE2 & 0x10000000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rdy */ + if ((valPE0 & 0x00100000) || + (valPE1 & 0x00100000) || + (valPE2 & 0x00100000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n"); + err = -1; + } + + /* SDR0_PExRCSSET shutdown */ + if ((valPE0 & 0x00000100) || + (valPE1 & 0x00000100) || + (valPE2 & 0x00000100)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n"); + err = -1; + } + + return err; +} + +/* Global PCIe core initializations for 440SPe core */ +static int __init ppc440spe_pciex_core_init(struct device_node *np) +{ + int time_out = 20; + + /* Set PLL clock receiver to LVPECL */ + dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28); + + /* Shouldn't we do all the calibration stuff etc... here ? */ + if (ppc440spe_pciex_check_reset(np)) + return -ENXIO; + + if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) { + printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration " + "failed (0x%08x)\n", + mfdcri(SDR0, PESDR0_PLLLCT2)); + return -1; + } + + /* De-assert reset of PCIe PLL, wait for lock */ + dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0); + udelay(3); + + while (time_out) { + if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) { + time_out--; + udelay(1); + } else + break; + } + if (!time_out) { + printk(KERN_INFO "PCIE: VCO output not locked\n"); + return -1; + } + + pr_debug("PCIE initialization OK\n"); + + return 3; +} + +static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val = 1 << 24; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT << 20; + else + val = PTYPE_ROOT_PORT << 20; + + if (port->index == 0) + val |= LNKW_X8 << 12; + else + val |= LNKW_X4 << 12; + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222); + if (ppc440spe_revA()) + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000); + if (port->index == 0) { + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1, + 0x35000000); + } + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, + (1 << 24) | (1 << 16), 1 << 12); + + return ppc4xx_pciex_port_reset_sdr(port); +} + +static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + return ppc440spe_pciex_init_port_hw(port); +} + +static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + int rc = ppc440spe_pciex_init_port_hw(port); + + port->has_ibpre = 1; + + return rc; +} + +static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* XXX Check what that value means... I hate magic */ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000); + out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + return 0; +} + +static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* Report CRS to the operating system */ + out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = +{ + .want_sdr = true, + .core_init = ppc440spe_pciex_core_init, + .port_init_hw = ppc440speA_pciex_init_port_hw, + .setup_utl = ppc440speA_pciex_init_utl, + .check_link = ppc4xx_pciex_check_link_sdr, +}; + +static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = +{ + .want_sdr = true, + .core_init = ppc440spe_pciex_core_init, + .port_init_hw = ppc440speB_pciex_init_port_hw, + .setup_utl = ppc440speB_pciex_init_utl, + .check_link = ppc4xx_pciex_check_link_sdr, +}; + +static int __init ppc460ex_pciex_core_init(struct device_node *np) +{ + /* Nothing to do, return 2 ports */ + return 2; +} + +static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val; + u32 utlset1; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT << 20; + else + val = PTYPE_ROOT_PORT << 20; + + if (port->index == 0) { + val |= LNKW_X1 << 12; + utlset1 = 0x20000000; + } else { + val |= LNKW_X4 << 12; + utlset1 = 0x20101101; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000); + + switch (port->index) { + case 0: + mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130); + mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006); + + mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000); + break; + + case 1: + mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006); + + mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000); + break; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) | + (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN)); + + /* Poll for PHY reset */ + /* XXX FIXME add timeout */ + switch (port->index) { + case 0: + while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1)) + udelay(10); + break; + case 1: + while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1)) + udelay(10); + break; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) & + ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) | + PESDRx_RCSSET_RSTPYN); + + port->has_ibpre = 1; + + return ppc4xx_pciex_port_reset_sdr(port); +} + +static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c); + out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata = +{ + .want_sdr = true, + .core_init = ppc460ex_pciex_core_init, + .port_init_hw = ppc460ex_pciex_init_port_hw, + .setup_utl = ppc460ex_pciex_init_utl, + .check_link = ppc4xx_pciex_check_link_sdr, +}; + +static int __init apm821xx_pciex_core_init(struct device_node *np) +{ + /* Return the number of pcie port */ + return 1; +} + +static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val; + + /* + * Do a software reset on PCIe ports. + * This code is to fix the issue that pci drivers doesn't re-assign + * bus number for PCIE devices after Uboot + * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000 + * PT quad port, SAS LSI 1064E) + */ + + mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0); + mdelay(10); + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT << 20; + else + val = PTYPE_ROOT_PORT << 20; + + val |= LNKW_X1 << 12; + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); + + mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130); + mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006); + + mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000); + mdelay(50); + mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000); + + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) | + (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN)); + + /* Poll for PHY reset */ + val = PESDR0_460EX_RSTSTA - port->sdr_base; + if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) { + printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__); + return -EBUSY; + } else { + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) & + ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) | + PESDRx_RCSSET_RSTPYN); + + port->has_ibpre = 1; + return 0; + } +} + +static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = { + .want_sdr = true, + .core_init = apm821xx_pciex_core_init, + .port_init_hw = apm821xx_pciex_init_port_hw, + .setup_utl = ppc460ex_pciex_init_utl, + .check_link = ppc4xx_pciex_check_link_sdr, +}; + +static int __init ppc460sx_pciex_core_init(struct device_node *np) +{ + /* HSS drive amplitude */ + mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211); + + mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211); + + mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211); + + /* HSS TX pre-emphasis */ + mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987); + + mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987); + + mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987); + + /* HSS TX calibration control */ + mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222); + mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000); + mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000); + + /* HSS TX slew control */ + mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF); + mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000); + mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000); + + /* Set HSS PRBS enabled */ + mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130); + mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130); + + udelay(100); + + /* De-assert PLLRESET */ + dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0); + + /* Reset DL, UTL, GPL before configuration */ + mtdcri(SDR0, PESDR0_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + mtdcri(SDR0, PESDR1_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + mtdcri(SDR0, PESDR2_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + + udelay(100); + + /* + * If bifurcation is not enabled, u-boot would have disabled the + * third PCIe port + */ + if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) == + 0x00000001)) { + printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n"); + printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n"); + return 3; + } + + printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n"); + return 2; +} + +static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + + if (port->endpoint) + dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2, + 0x01000000, 0); + else + dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2, + 0, 0x01000000); + + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, + (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL), + PESDRx_RCSSET_RSTPYN); + + port->has_ibpre = 1; + + return ppc4xx_pciex_port_reset_sdr(port); +} + +static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* Max 128 Bytes */ + out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000); + /* Assert VRB and TXE - per datasheet turn off addr validation */ + out_be32(port->utl_base + PEUTL_PCTL, 0x80800000); + return 0; +} + +static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) +{ + void __iomem *mbase; + int attempt = 50; + + port->link = 0; + + mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); + if (mbase == NULL) { + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); + return; + } + + while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) + & PECFG_460SX_DLLSTA_LINKUP))) { + attempt--; + mdelay(10); + } + if (attempt) + port->link = 1; + iounmap(mbase); +} + +static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { + .want_sdr = true, + .core_init = ppc460sx_pciex_core_init, + .port_init_hw = ppc460sx_pciex_init_port_hw, + .setup_utl = ppc460sx_pciex_init_utl, + .check_link = ppc460sx_pciex_check_link, +}; + +#endif /* CONFIG_44x */ + +#ifdef CONFIG_40x + +static int __init ppc405ex_pciex_core_init(struct device_node *np) +{ + /* Nothing to do, return 2 ports */ + return 2; +} + +static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port) +{ + /* Assert the PE0_PHY reset */ + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000); + msleep(1); + + /* deassert the PE0_hotreset */ + if (port->endpoint) + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000); + else + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000); + + /* poll for phy !reset */ + /* XXX FIXME add timeout */ + while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000)) + ; + + /* deassert the PE0_gpl_utl_reset */ + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000); +} + +static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT; + else + val = PTYPE_ROOT_PORT; + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, + 1 << 24 | val << 20 | LNKW_X1 << 12); + + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); + mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000); + mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003); + + /* + * Only reset the PHY when no link is currently established. + * This is for the Atheros PCIe board which has problems to establish + * the link (again) after this PHY reset. All other currently tested + * PCIe boards don't show this problem. + * This has to be re-tested and fixed in a later release! + */ + val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP); + if (!(val & 0x00001000)) + ppc405ex_pcie_phy_reset(port); + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */ + + port->has_ibpre = 1; + + return ppc4xx_pciex_port_reset_sdr(port); +} + +static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = +{ + .want_sdr = true, + .core_init = ppc405ex_pciex_core_init, + .port_init_hw = ppc405ex_pciex_init_port_hw, + .setup_utl = ppc405ex_pciex_init_utl, + .check_link = ppc4xx_pciex_check_link_sdr, +}; + +#endif /* CONFIG_40x */ + +#ifdef CONFIG_476FPE +static int __init ppc_476fpe_pciex_core_init(struct device_node *np) +{ + return 4; +} + +static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port) +{ + u32 timeout_ms = 20; + u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT); + void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000, + 0x1000); + + printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); + + if (mbase == NULL) { + printk(KERN_WARNING "PCIE%d: failed to get cfg space\n", + port->index); + return; + } + + while (timeout_ms--) { + val = in_le32(mbase + PECFG_TLDLP); + + if ((val & mask) == mask) + break; + msleep(10); + } + + if (val & PECFG_TLDLP_PRESENT) { + printk(KERN_INFO "PCIE%d: link is up !\n", port->index); + port->link = 1; + } else + printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index); + + iounmap(mbase); +} + +static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata = +{ + .core_init = ppc_476fpe_pciex_core_init, + .check_link = ppc_476fpe_pciex_check_link, +}; +#endif /* CONFIG_476FPE */ + +/* Check that the core has been initied and if not, do it */ +static int __init ppc4xx_pciex_check_core_init(struct device_node *np) +{ + static int core_init; + int count = -ENODEV; + + if (core_init++) + return 0; + +#ifdef CONFIG_44x + if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) { + if (ppc440spe_revA()) + ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops; + else + ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops; + } + if (of_device_is_compatible(np, "ibm,plb-pciex-460ex")) + ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops; + if (of_device_is_compatible(np, "ibm,plb-pciex-460sx")) + ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops; + if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx")) + ppc4xx_pciex_hwops = &apm821xx_pcie_hwops; +#endif /* CONFIG_44x */ +#ifdef CONFIG_40x + if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) + ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; +#endif +#ifdef CONFIG_476FPE + if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe") + || of_device_is_compatible(np, "ibm,plb-pciex-476gtr")) + ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops; +#endif + if (ppc4xx_pciex_hwops == NULL) { + printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np); + return -ENODEV; + } + + count = ppc4xx_pciex_hwops->core_init(np); + if (count > 0) { + ppc4xx_pciex_ports = + kcalloc(count, sizeof(struct ppc4xx_pciex_port), + GFP_KERNEL); + if (ppc4xx_pciex_ports) { + ppc4xx_pciex_port_count = count; + return 0; + } + printk(KERN_WARNING "PCIE: failed to allocate ports array\n"); + return -ENOMEM; + } + return -ENODEV; +} + +static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port) +{ + /* We map PCI Express configuration based on the reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH, + RES_TO_U32_HIGH(port->cfg_space.start)); + dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL, + RES_TO_U32_LOW(port->cfg_space.start)); + + /* XXX FIXME: Use size from reg property. For now, map 512M */ + dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001); + + /* We map UTL registers based on the reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_REGBAH, + RES_TO_U32_HIGH(port->utl_regs.start)); + dcr_write(port->dcrs, DCRO_PEGPL_REGBAL, + RES_TO_U32_LOW(port->utl_regs.start)); + + /* XXX FIXME: Use size from reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001); + + /* Disable all other outbound windows */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0); +} + +static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port) +{ + int rc = 0; + + /* Init HW */ + if (ppc4xx_pciex_hwops->port_init_hw) + rc = ppc4xx_pciex_hwops->port_init_hw(port); + if (rc != 0) + return rc; + + /* + * Initialize mapping: disable all regions and configure + * CFG and REG regions based on resources in the device tree + */ + ppc4xx_pciex_port_init_mapping(port); + + if (ppc4xx_pciex_hwops->check_link) + ppc4xx_pciex_hwops->check_link(port); + + /* + * Map UTL + */ + port->utl_base = ioremap(port->utl_regs.start, 0x100); + BUG_ON(port->utl_base == NULL); + + /* + * Setup UTL registers --BenH. + */ + if (ppc4xx_pciex_hwops->setup_utl) + ppc4xx_pciex_hwops->setup_utl(port); + + /* + * Check for VC0 active or PLL Locked and assert RDY. + */ + if (port->sdr_base) { + if (of_device_is_compatible(port->node, + "ibm,plb-pciex-460sx")){ + if (port->link && ppc4xx_pciex_wait_on_sdr(port, + PESDRn_RCSSTS, + 1 << 12, 1 << 12, 5000)) { + printk(KERN_INFO "PCIE%d: PLL not locked\n", + port->index); + port->link = 0; + } + } else if (port->link && + ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, + 1 << 16, 1 << 16, 5000)) { + printk(KERN_INFO "PCIE%d: VC0 not active\n", + port->index); + port->link = 0; + } + + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20); + } + + msleep(100); + + return 0; +} + +static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port, + struct pci_bus *bus, + unsigned int devfn) +{ + static int message; + + /* Endpoint can not generate upstream(remote) config cycles */ + if (port->endpoint && bus->number != port->hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check we are within the mapped range */ + if (bus->number > port->hose->last_busno) { + if (!message) { + printk(KERN_WARNING "Warning! Probing bus %u" + " out of range !\n", bus->number); + message++; + } + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* The root complex has only one device / function */ + if (bus->number == port->hose->first_busno && devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* The other side of the RC has only one device as well */ + if (bus->number == (port->hose->first_busno + 1) && + PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check if we have a link */ + if ((bus->number != port->hose->first_busno) && !port->link) + return PCIBIOS_DEVICE_NOT_FOUND; + + return 0; +} + +static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port, + struct pci_bus *bus, + unsigned int devfn) +{ + int relbus; + + /* Remove the casts when we finally remove the stupid volatile + * in struct pci_controller + */ + if (bus->number == port->hose->first_busno) + return (void __iomem *)port->hose->cfg_addr; + + relbus = bus->number - (port->hose->first_busno + 1); + return (void __iomem *)port->hose->cfg_data + + ((relbus << 20) | (devfn << 12)); +} + +static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct ppc4xx_pciex_port *port = + &ppc4xx_pciex_ports[hose->indirect_type]; + void __iomem *addr; + u32 gpl_cfg; + + BUG_ON(hose != port->hose); + + if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr = ppc4xx_pciex_get_config_base(port, bus, devfn); + + /* + * Reading from configuration space of non-existing device can + * generate transaction errors. For the read duration we suppress + * assertion of machine check exceptions to avoid those. + */ + gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG); + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA); + + /* Make sure no CRS is recorded */ + out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000); + + switch (len) { + case 1: + *val = in_8((u8 *)(addr + offset)); + break; + case 2: + *val = in_le16((u16 *)(addr + offset)); + break; + default: + *val = in_le32((u32 *)(addr + offset)); + break; + } + + pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x" + " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n", + bus->number, hose->first_busno, hose->last_busno, + devfn, offset, len, addr + offset, *val); + + /* Check for CRS (440SPe rev B does that for us but heh ..) */ + if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) { + pr_debug("Got CRS !\n"); + if (len != 4 || offset != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + *val = 0xffff0001; + } + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg); + + return PCIBIOS_SUCCESSFUL; +} + +static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct ppc4xx_pciex_port *port = + &ppc4xx_pciex_ports[hose->indirect_type]; + void __iomem *addr; + u32 gpl_cfg; + + if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr = ppc4xx_pciex_get_config_base(port, bus, devfn); + + /* + * Reading from configuration space of non-existing device can + * generate transaction errors. For the read duration we suppress + * assertion of machine check exceptions to avoid those. + */ + gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG); + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA); + + pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x" + " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n", + bus->number, hose->first_busno, hose->last_busno, + devfn, offset, len, addr + offset, val); + + switch (len) { + case 1: + out_8((u8 *)(addr + offset), val); + break; + case 2: + out_le16((u16 *)(addr + offset), val); + break; + default: + out_le32((u32 *)(addr + offset), val); + break; + } + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops ppc4xx_pciex_pci_ops = +{ + .read = ppc4xx_pciex_read_config, + .write = ppc4xx_pciex_write_config, +}; + +static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || + (index < 2 && size < 0x100000) || + (index == 2 && size < 0x100) || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + switch (index) { + case 0: + out_le32(mbase + PECFG_POM0LAH, pciah); + out_le32(mbase + PECFG_POM0LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff); + /*Enabled and single region */ + if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx")) + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, + sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT + | DCRO_PEGPL_OMRxMSKL_VAL); + else if (of_device_is_compatible( + port->node, "ibm,plb-pciex-476fpe") || + of_device_is_compatible( + port->node, "ibm,plb-pciex-476gtr")) + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, + sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT + | DCRO_PEGPL_OMRxMSKL_VAL); + else + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, + sa | DCRO_PEGPL_OMR1MSKL_UOT + | DCRO_PEGPL_OMRxMSKL_VAL); + break; + case 1: + out_le32(mbase + PECFG_POM1LAH, pciah); + out_le32(mbase + PECFG_POM1LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, + sa | DCRO_PEGPL_OMRxMSKL_VAL); + break; + case 2: + out_le32(mbase + PECFG_POM2LAH, pciah); + out_le32(mbase + PECFG_POM2LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff); + /* Note that 3 here means enabled | IO space !!! */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, + sa | DCRO_PEGPL_OMR3MSKL_IO + | DCRO_PEGPL_OMRxMSKL_VAL); + break; + } + + return 0; +} + +static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + resource_size_t offset = hose->mem_offset[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 1) { + printk(KERN_WARNING "%pOF: Too many ranges\n", + port->node); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + res->start, + res->start - offset, + resource_size(res), + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); + + /* Configure IO, always 64K starting at 0. We hard wire it to 64K ! + * Note also that it -has- to be region index 2 on this HW + */ + if (hose->io_resource.flags & IORESOURCE_IO) + ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->io_base_phys, 0, + 0x10000, IORESOURCE_IO, 2); +} + +static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase, + struct resource *res) +{ + resource_size_t size = resource_size(res); + u64 sa; + + if (port->endpoint) { + resource_size_t ep_addr = 0; + resource_size_t ep_size = 32 << 20; + + /* Currently we map a fixed 64MByte window to PLB address + * 0 (SDRAM). This should probably be configurable via a dts + * property. + */ + + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(ep_size)); + + /* Setup BAR0 */ + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | + PCI_BASE_ADDRESS_MEM_TYPE_64); + + /* Disable BAR1 & BAR2 */ + out_le32(mbase + PECFG_BAR1MPA, 0); + out_le32(mbase + PECFG_BAR2HMPA, 0); + out_le32(mbase + PECFG_BAR2LMPA, 0); + + out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); + } else { + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(size)); + if (res->flags & IORESOURCE_PREFETCH) + sa |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") || + of_device_is_compatible( + port->node, "ibm,plb-pciex-476fpe") || + of_device_is_compatible( + port->node, "ibm,plb-pciex-476gtr")) + sa |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); + + /* The setup of the split looks weird to me ... let's see + * if it works + */ + out_le32(mbase + PECFG_PIM0LAL, 0x00000000); + out_le32(mbase + PECFG_PIM0LAH, 0x00000000); + out_le32(mbase + PECFG_PIM1LAL, 0x00000000); + out_le32(mbase + PECFG_PIM1LAH, 0x00000000); + out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); + out_le32(mbase + PECFG_PIM01SAL, 0x00000000); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); + } + + /* Enable inbound mapping */ + out_le32(mbase + PECFG_PIMEN, 0x1); + + /* Enable I/O, Mem, and Busmaster cycles */ + out_le16(mbase + PCI_COMMAND, + in_le16(mbase + PCI_COMMAND) | + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); +} + +static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) +{ + struct resource dma_window; + struct pci_controller *hose = NULL; + const int *bus_range; + int primary = 0, busses; + void __iomem *mbase = NULL, *cfg_data = NULL; + const u32 *pval; + u32 val; + + /* Check if primary bridge */ + if (of_get_property(port->node, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(port->node, "bus-range", NULL); + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(port->node); + if (!hose) + goto fail; + + /* We stick the port number in "indirect_type" so the config space + * ops can retrieve the port data structure easily + */ + hose->indirect_type = port->index; + + /* Get bus range */ + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Because of how big mapping the config space is (1M per bus), we + * limit how many busses we support. In the long run, we could replace + * that with something akin to kmap_atomic instead. We set aside 1 bus + * for the host itself too. + */ + busses = hose->last_busno - hose->first_busno; /* This is off by 1 */ + if (busses > MAX_PCIE_BUS_MAPPED) { + busses = MAX_PCIE_BUS_MAPPED; + hose->last_busno = hose->first_busno + busses; + } + + if (!port->endpoint) { + /* Only map the external config space in cfg_data for + * PCIe root-complexes. External space is 1M per bus + */ + cfg_data = ioremap(port->cfg_space.start + + (hose->first_busno + 1) * 0x100000, + busses * 0x100000); + if (cfg_data == NULL) { + printk(KERN_ERR "%pOF: Can't map external config space !", + port->node); + goto fail; + } + hose->cfg_data = cfg_data; + } + + /* Always map the host config space in cfg_addr. + * Internal space is 4K + */ + mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); + if (mbase == NULL) { + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); + goto fail; + } + hose->cfg_addr = mbase; + + pr_debug("PCIE %pOF, bus %d..%d\n", port->node, + hose->first_busno, hose->last_busno); + pr_debug(" config space mapped at: root @0x%p, other @0x%p\n", + hose->cfg_addr, hose->cfg_data); + + /* Setup config space */ + hose->ops = &ppc4xx_pciex_pci_ops; + port->hose = hose; + mbase = (void __iomem *)hose->cfg_addr; + + if (!port->endpoint) { + /* + * Set bus numbers on our root port + */ + out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); + out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); + out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); + } + + /* + * OMRs are already reset, also disable PIMs + */ + out_le32(mbase + PECFG_PIMEN, 0); + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, port->node, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pciex_POMs(port, hose, mbase); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); + + /* The root complex doesn't show up if we don't set some vendor + * and device IDs into it. The defaults below are the same bogus + * one that the initial code in arch/ppc had. This can be + * overwritten by setting the "vendor-id/device-id" properties + * in the pciex node. + */ + + /* Get the (optional) vendor-/device-id from the device-tree */ + pval = of_get_property(port->node, "vendor-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xaaa0 + port->index; + else + val = 0xeee0 + port->index; + } + out_le16(mbase + 0x200, val); + + pval = of_get_property(port->node, "device-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xbed0 + port->index; + else + val = 0xfed0 + port->index; + } + out_le16(mbase + 0x202, val); + + /* Enable Bus master, memory, and io space */ + if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx")) + out_le16(mbase + 0x204, 0x7); + + if (!port->endpoint) { + /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ + out_le32(mbase + 0x208, 0x06040001); + + printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", + port->index); + } else { + /* Set Class Code to Processor/PPC */ + out_le32(mbase + 0x208, 0x0b200001); + + printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", + port->index); + } + + return; + fail: + if (hose) + pcibios_free_controller(hose); + if (cfg_data) + iounmap(cfg_data); + if (mbase) + iounmap(mbase); +} + +static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) +{ + struct ppc4xx_pciex_port *port; + const u32 *pval; + int portno; + unsigned int dcrs; + + /* First, proceed to core initialization as we assume there's + * only one PCIe core in the system + */ + if (ppc4xx_pciex_check_core_init(np)) + return; + + /* Get the port number from the device-tree */ + pval = of_get_property(np, "port", NULL); + if (pval == NULL) { + printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np); + return; + } + portno = *pval; + if (portno >= ppc4xx_pciex_port_count) { + printk(KERN_ERR "PCIE: port number out of range for %pOF\n", + np); + return; + } + port = &ppc4xx_pciex_ports[portno]; + port->index = portno; + + /* + * Check if device is enabled + */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index); + return; + } + + port->node = of_node_get(np); + if (ppc4xx_pciex_hwops->want_sdr) { + pval = of_get_property(np, "sdr-base", NULL); + if (pval == NULL) { + printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n", + np); + return; + } + port->sdr_base = *pval; + } + + /* Check if device_type property is set to "pci" or "pci-endpoint". + * Resulting from this setup this PCIe port will be configured + * as root-complex or as endpoint. + */ + if (of_node_is_type(port->node, "pci-endpoint")) { + port->endpoint = 1; + } else if (of_node_is_type(port->node, "pci")) { + port->endpoint = 0; + } else { + printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n", + np); + return; + } + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &port->cfg_space)) { + printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 1, &port->utl_regs)) { + printk(KERN_ERR "%pOF: Can't get UTL register base !", np); + return; + } + + /* Map DCRs */ + dcrs = dcr_resource_start(np, 0); + if (dcrs == 0) { + printk(KERN_ERR "%pOF: Can't get DCR register base !", np); + return; + } + port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); + + /* Initialize the port specific registers */ + if (ppc4xx_pciex_port_init(port)) { + printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index); + return; + } + + /* Setup the linux hose data structure */ + ppc4xx_pciex_port_setup_hose(port); +} + +#endif /* CONFIG_PPC4xx_PCI_EXPRESS */ + +static int __init ppc4xx_pci_find_bridges(void) +{ + struct device_node *np; + + pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0); + +#ifdef CONFIG_PPC4xx_PCI_EXPRESS + for_each_compatible_node(np, NULL, "ibm,plb-pciex") + ppc4xx_probe_pciex_bridge(np); +#endif + for_each_compatible_node(np, NULL, "ibm,plb-pcix") + ppc4xx_probe_pcix_bridge(np); + for_each_compatible_node(np, NULL, "ibm,plb-pci") + ppc4xx_probe_pci_bridge(np); + + return 0; +} +arch_initcall(ppc4xx_pci_find_bridges); + diff --git a/arch/powerpc/platforms/4xx/pci.h b/arch/powerpc/platforms/4xx/pci.h new file mode 100644 index 000000000..bb4821938 --- /dev/null +++ b/arch/powerpc/platforms/4xx/pci.h @@ -0,0 +1,505 @@ +/* + * PCI / PCI-X / PCI-Express support for 4xx parts + * + * Copyright 2007 Ben. Herrenschmidt , IBM Corp. + * + * Bits and pieces extracted from arch/ppc support by + * + * Matt Porter + * + * Copyright 2002-2005 MontaVista Software Inc. + */ +#ifndef __PPC4XX_PCI_H__ +#define __PPC4XX_PCI_H__ + +/* + * 4xx PCI-X bridge register definitions + */ +#define PCIX0_VENDID 0x000 +#define PCIX0_DEVID 0x002 +#define PCIX0_COMMAND 0x004 +#define PCIX0_STATUS 0x006 +#define PCIX0_REVID 0x008 +#define PCIX0_CLS 0x009 +#define PCIX0_CACHELS 0x00c +#define PCIX0_LATTIM 0x00d +#define PCIX0_HDTYPE 0x00e +#define PCIX0_BIST 0x00f +#define PCIX0_BAR0L 0x010 +#define PCIX0_BAR0H 0x014 +#define PCIX0_BAR1 0x018 +#define PCIX0_BAR2L 0x01c +#define PCIX0_BAR2H 0x020 +#define PCIX0_BAR3 0x024 +#define PCIX0_CISPTR 0x028 +#define PCIX0_SBSYSVID 0x02c +#define PCIX0_SBSYSID 0x02e +#define PCIX0_EROMBA 0x030 +#define PCIX0_CAP 0x034 +#define PCIX0_RES0 0x035 +#define PCIX0_RES1 0x036 +#define PCIX0_RES2 0x038 +#define PCIX0_INTLN 0x03c +#define PCIX0_INTPN 0x03d +#define PCIX0_MINGNT 0x03e +#define PCIX0_MAXLTNCY 0x03f +#define PCIX0_BRDGOPT1 0x040 +#define PCIX0_BRDGOPT2 0x044 +#define PCIX0_ERREN 0x050 +#define PCIX0_ERRSTS 0x054 +#define PCIX0_PLBBESR 0x058 +#define PCIX0_PLBBEARL 0x05c +#define PCIX0_PLBBEARH 0x060 +#define PCIX0_POM0LAL 0x068 +#define PCIX0_POM0LAH 0x06c +#define PCIX0_POM0SA 0x070 +#define PCIX0_POM0PCIAL 0x074 +#define PCIX0_POM0PCIAH 0x078 +#define PCIX0_POM1LAL 0x07c +#define PCIX0_POM1LAH 0x080 +#define PCIX0_POM1SA 0x084 +#define PCIX0_POM1PCIAL 0x088 +#define PCIX0_POM1PCIAH 0x08c +#define PCIX0_POM2SA 0x090 +#define PCIX0_PIM0SAL 0x098 +#define PCIX0_PIM0SA PCIX0_PIM0SAL +#define PCIX0_PIM0LAL 0x09c +#define PCIX0_PIM0LAH 0x0a0 +#define PCIX0_PIM1SA 0x0a4 +#define PCIX0_PIM1LAL 0x0a8 +#define PCIX0_PIM1LAH 0x0ac +#define PCIX0_PIM2SAL 0x0b0 +#define PCIX0_PIM2SA PCIX0_PIM2SAL +#define PCIX0_PIM2LAL 0x0b4 +#define PCIX0_PIM2LAH 0x0b8 +#define PCIX0_OMCAPID 0x0c0 +#define PCIX0_OMNIPTR 0x0c1 +#define PCIX0_OMMC 0x0c2 +#define PCIX0_OMMA 0x0c4 +#define PCIX0_OMMUA 0x0c8 +#define PCIX0_OMMDATA 0x0cc +#define PCIX0_OMMEOI 0x0ce +#define PCIX0_PMCAPID 0x0d0 +#define PCIX0_PMNIPTR 0x0d1 +#define PCIX0_PMC 0x0d2 +#define PCIX0_PMCSR 0x0d4 +#define PCIX0_PMCSRBSE 0x0d6 +#define PCIX0_PMDATA 0x0d7 +#define PCIX0_PMSCRR 0x0d8 +#define PCIX0_CAPID 0x0dc +#define PCIX0_NIPTR 0x0dd +#define PCIX0_CMD 0x0de +#define PCIX0_STS 0x0e0 +#define PCIX0_IDR 0x0e4 +#define PCIX0_CID 0x0e8 +#define PCIX0_RID 0x0ec +#define PCIX0_PIM0SAH 0x0f8 +#define PCIX0_PIM2SAH 0x0fc +#define PCIX0_MSGIL 0x100 +#define PCIX0_MSGIH 0x104 +#define PCIX0_MSGOL 0x108 +#define PCIX0_MSGOH 0x10c +#define PCIX0_IM 0x1f8 + +/* + * 4xx PCI bridge register definitions + */ +#define PCIL0_PMM0LA 0x00 +#define PCIL0_PMM0MA 0x04 +#define PCIL0_PMM0PCILA 0x08 +#define PCIL0_PMM0PCIHA 0x0c +#define PCIL0_PMM1LA 0x10 +#define PCIL0_PMM1MA 0x14 +#define PCIL0_PMM1PCILA 0x18 +#define PCIL0_PMM1PCIHA 0x1c +#define PCIL0_PMM2LA 0x20 +#define PCIL0_PMM2MA 0x24 +#define PCIL0_PMM2PCILA 0x28 +#define PCIL0_PMM2PCIHA 0x2c +#define PCIL0_PTM1MS 0x30 +#define PCIL0_PTM1LA 0x34 +#define PCIL0_PTM2MS 0x38 +#define PCIL0_PTM2LA 0x3c + +/* + * 4xx PCIe bridge register definitions + */ + +/* DCR offsets */ +#define DCRO_PEGPL_CFGBAH 0x00 +#define DCRO_PEGPL_CFGBAL 0x01 +#define DCRO_PEGPL_CFGMSK 0x02 +#define DCRO_PEGPL_MSGBAH 0x03 +#define DCRO_PEGPL_MSGBAL 0x04 +#define DCRO_PEGPL_MSGMSK 0x05 +#define DCRO_PEGPL_OMR1BAH 0x06 +#define DCRO_PEGPL_OMR1BAL 0x07 +#define DCRO_PEGPL_OMR1MSKH 0x08 +#define DCRO_PEGPL_OMR1MSKL 0x09 +#define DCRO_PEGPL_OMR2BAH 0x0a +#define DCRO_PEGPL_OMR2BAL 0x0b +#define DCRO_PEGPL_OMR2MSKH 0x0c +#define DCRO_PEGPL_OMR2MSKL 0x0d +#define DCRO_PEGPL_OMR3BAH 0x0e +#define DCRO_PEGPL_OMR3BAL 0x0f +#define DCRO_PEGPL_OMR3MSKH 0x10 +#define DCRO_PEGPL_OMR3MSKL 0x11 +#define DCRO_PEGPL_REGBAH 0x12 +#define DCRO_PEGPL_REGBAL 0x13 +#define DCRO_PEGPL_REGMSK 0x14 +#define DCRO_PEGPL_SPECIAL 0x15 +#define DCRO_PEGPL_CFG 0x16 +#define DCRO_PEGPL_ESR 0x17 +#define DCRO_PEGPL_EARH 0x18 +#define DCRO_PEGPL_EARL 0x19 +#define DCRO_PEGPL_EATR 0x1a + +/* DMER mask */ +#define GPL_DMER_MASK_DISA 0x02000000 + +/* + * System DCRs (SDRs) + */ +#define PESDR0_PLLLCT1 0x03a0 +#define PESDR0_PLLLCT2 0x03a1 +#define PESDR0_PLLLCT3 0x03a2 + +/* + * 440SPe additional DCRs + */ +#define PESDR0_440SPE_UTLSET1 0x0300 +#define PESDR0_440SPE_UTLSET2 0x0301 +#define PESDR0_440SPE_DLPSET 0x0302 +#define PESDR0_440SPE_LOOP 0x0303 +#define PESDR0_440SPE_RCSSET 0x0304 +#define PESDR0_440SPE_RCSSTS 0x0305 +#define PESDR0_440SPE_HSSL0SET1 0x0306 +#define PESDR0_440SPE_HSSL0SET2 0x0307 +#define PESDR0_440SPE_HSSL0STS 0x0308 +#define PESDR0_440SPE_HSSL1SET1 0x0309 +#define PESDR0_440SPE_HSSL1SET2 0x030a +#define PESDR0_440SPE_HSSL1STS 0x030b +#define PESDR0_440SPE_HSSL2SET1 0x030c +#define PESDR0_440SPE_HSSL2SET2 0x030d +#define PESDR0_440SPE_HSSL2STS 0x030e +#define PESDR0_440SPE_HSSL3SET1 0x030f +#define PESDR0_440SPE_HSSL3SET2 0x0310 +#define PESDR0_440SPE_HSSL3STS 0x0311 +#define PESDR0_440SPE_HSSL4SET1 0x0312 +#define PESDR0_440SPE_HSSL4SET2 0x0313 +#define PESDR0_440SPE_HSSL4STS 0x0314 +#define PESDR0_440SPE_HSSL5SET1 0x0315 +#define PESDR0_440SPE_HSSL5SET2 0x0316 +#define PESDR0_440SPE_HSSL5STS 0x0317 +#define PESDR0_440SPE_HSSL6SET1 0x0318 +#define PESDR0_440SPE_HSSL6SET2 0x0319 +#define PESDR0_440SPE_HSSL6STS 0x031a +#define PESDR0_440SPE_HSSL7SET1 0x031b +#define PESDR0_440SPE_HSSL7SET2 0x031c +#define PESDR0_440SPE_HSSL7STS 0x031d +#define PESDR0_440SPE_HSSCTLSET 0x031e +#define PESDR0_440SPE_LANE_ABCD 0x031f +#define PESDR0_440SPE_LANE_EFGH 0x0320 + +#define PESDR1_440SPE_UTLSET1 0x0340 +#define PESDR1_440SPE_UTLSET2 0x0341 +#define PESDR1_440SPE_DLPSET 0x0342 +#define PESDR1_440SPE_LOOP 0x0343 +#define PESDR1_440SPE_RCSSET 0x0344 +#define PESDR1_440SPE_RCSSTS 0x0345 +#define PESDR1_440SPE_HSSL0SET1 0x0346 +#define PESDR1_440SPE_HSSL0SET2 0x0347 +#define PESDR1_440SPE_HSSL0STS 0x0348 +#define PESDR1_440SPE_HSSL1SET1 0x0349 +#define PESDR1_440SPE_HSSL1SET2 0x034a +#define PESDR1_440SPE_HSSL1STS 0x034b +#define PESDR1_440SPE_HSSL2SET1 0x034c +#define PESDR1_440SPE_HSSL2SET2 0x034d +#define PESDR1_440SPE_HSSL2STS 0x034e +#define PESDR1_440SPE_HSSL3SET1 0x034f +#define PESDR1_440SPE_HSSL3SET2 0x0350 +#define PESDR1_440SPE_HSSL3STS 0x0351 +#define PESDR1_440SPE_HSSCTLSET 0x0352 +#define PESDR1_440SPE_LANE_ABCD 0x0353 + +#define PESDR2_440SPE_UTLSET1 0x0370 +#define PESDR2_440SPE_UTLSET2 0x0371 +#define PESDR2_440SPE_DLPSET 0x0372 +#define PESDR2_440SPE_LOOP 0x0373 +#define PESDR2_440SPE_RCSSET 0x0374 +#define PESDR2_440SPE_RCSSTS 0x0375 +#define PESDR2_440SPE_HSSL0SET1 0x0376 +#define PESDR2_440SPE_HSSL0SET2 0x0377 +#define PESDR2_440SPE_HSSL0STS 0x0378 +#define PESDR2_440SPE_HSSL1SET1 0x0379 +#define PESDR2_440SPE_HSSL1SET2 0x037a +#define PESDR2_440SPE_HSSL1STS 0x037b +#define PESDR2_440SPE_HSSL2SET1 0x037c +#define PESDR2_440SPE_HSSL2SET2 0x037d +#define PESDR2_440SPE_HSSL2STS 0x037e +#define PESDR2_440SPE_HSSL3SET1 0x037f +#define PESDR2_440SPE_HSSL3SET2 0x0380 +#define PESDR2_440SPE_HSSL3STS 0x0381 +#define PESDR2_440SPE_HSSCTLSET 0x0382 +#define PESDR2_440SPE_LANE_ABCD 0x0383 + +/* + * 405EX additional DCRs + */ +#define PESDR0_405EX_UTLSET1 0x0400 +#define PESDR0_405EX_UTLSET2 0x0401 +#define PESDR0_405EX_DLPSET 0x0402 +#define PESDR0_405EX_LOOP 0x0403 +#define PESDR0_405EX_RCSSET 0x0404 +#define PESDR0_405EX_RCSSTS 0x0405 +#define PESDR0_405EX_PHYSET1 0x0406 +#define PESDR0_405EX_PHYSET2 0x0407 +#define PESDR0_405EX_BIST 0x0408 +#define PESDR0_405EX_LPB 0x040B +#define PESDR0_405EX_PHYSTA 0x040C + +#define PESDR1_405EX_UTLSET1 0x0440 +#define PESDR1_405EX_UTLSET2 0x0441 +#define PESDR1_405EX_DLPSET 0x0442 +#define PESDR1_405EX_LOOP 0x0443 +#define PESDR1_405EX_RCSSET 0x0444 +#define PESDR1_405EX_RCSSTS 0x0445 +#define PESDR1_405EX_PHYSET1 0x0446 +#define PESDR1_405EX_PHYSET2 0x0447 +#define PESDR1_405EX_BIST 0x0448 +#define PESDR1_405EX_LPB 0x044B +#define PESDR1_405EX_PHYSTA 0x044C + +/* + * 460EX additional DCRs + */ +#define PESDR0_460EX_L0BIST 0x0308 +#define PESDR0_460EX_L0BISTSTS 0x0309 +#define PESDR0_460EX_L0CDRCTL 0x030A +#define PESDR0_460EX_L0DRV 0x030B +#define PESDR0_460EX_L0REC 0x030C +#define PESDR0_460EX_L0LPB 0x030D +#define PESDR0_460EX_L0CLK 0x030E +#define PESDR0_460EX_PHY_CTL_RST 0x030F +#define PESDR0_460EX_RSTSTA 0x0310 +#define PESDR0_460EX_OBS 0x0311 +#define PESDR0_460EX_L0ERRC 0x0320 + +#define PESDR1_460EX_L0BIST 0x0348 +#define PESDR1_460EX_L1BIST 0x0349 +#define PESDR1_460EX_L2BIST 0x034A +#define PESDR1_460EX_L3BIST 0x034B +#define PESDR1_460EX_L0BISTSTS 0x034C +#define PESDR1_460EX_L1BISTSTS 0x034D +#define PESDR1_460EX_L2BISTSTS 0x034E +#define PESDR1_460EX_L3BISTSTS 0x034F +#define PESDR1_460EX_L0CDRCTL 0x0350 +#define PESDR1_460EX_L1CDRCTL 0x0351 +#define PESDR1_460EX_L2CDRCTL 0x0352 +#define PESDR1_460EX_L3CDRCTL 0x0353 +#define PESDR1_460EX_L0DRV 0x0354 +#define PESDR1_460EX_L1DRV 0x0355 +#define PESDR1_460EX_L2DRV 0x0356 +#define PESDR1_460EX_L3DRV 0x0357 +#define PESDR1_460EX_L0REC 0x0358 +#define PESDR1_460EX_L1REC 0x0359 +#define PESDR1_460EX_L2REC 0x035A +#define PESDR1_460EX_L3REC 0x035B +#define PESDR1_460EX_L0LPB 0x035C +#define PESDR1_460EX_L1LPB 0x035D +#define PESDR1_460EX_L2LPB 0x035E +#define PESDR1_460EX_L3LPB 0x035F +#define PESDR1_460EX_L0CLK 0x0360 +#define PESDR1_460EX_L1CLK 0x0361 +#define PESDR1_460EX_L2CLK 0x0362 +#define PESDR1_460EX_L3CLK 0x0363 +#define PESDR1_460EX_PHY_CTL_RST 0x0364 +#define PESDR1_460EX_RSTSTA 0x0365 +#define PESDR1_460EX_OBS 0x0366 +#define PESDR1_460EX_L0ERRC 0x0368 +#define PESDR1_460EX_L1ERRC 0x0369 +#define PESDR1_460EX_L2ERRC 0x036A +#define PESDR1_460EX_L3ERRC 0x036B +#define PESDR0_460EX_IHS1 0x036C +#define PESDR0_460EX_IHS2 0x036D + +/* + * 460SX additional DCRs + */ +#define PESDRn_460SX_RCEI 0x02 + +#define PESDR0_460SX_HSSL0DAMP 0x320 +#define PESDR0_460SX_HSSL1DAMP 0x321 +#define PESDR0_460SX_HSSL2DAMP 0x322 +#define PESDR0_460SX_HSSL3DAMP 0x323 +#define PESDR0_460SX_HSSL4DAMP 0x324 +#define PESDR0_460SX_HSSL5DAMP 0x325 +#define PESDR0_460SX_HSSL6DAMP 0x326 +#define PESDR0_460SX_HSSL7DAMP 0x327 + +#define PESDR1_460SX_HSSL0DAMP 0x354 +#define PESDR1_460SX_HSSL1DAMP 0x355 +#define PESDR1_460SX_HSSL2DAMP 0x356 +#define PESDR1_460SX_HSSL3DAMP 0x357 + +#define PESDR2_460SX_HSSL0DAMP 0x384 +#define PESDR2_460SX_HSSL1DAMP 0x385 +#define PESDR2_460SX_HSSL2DAMP 0x386 +#define PESDR2_460SX_HSSL3DAMP 0x387 + +#define PESDR0_460SX_HSSL0COEFA 0x328 +#define PESDR0_460SX_HSSL1COEFA 0x329 +#define PESDR0_460SX_HSSL2COEFA 0x32A +#define PESDR0_460SX_HSSL3COEFA 0x32B +#define PESDR0_460SX_HSSL4COEFA 0x32C +#define PESDR0_460SX_HSSL5COEFA 0x32D +#define PESDR0_460SX_HSSL6COEFA 0x32E +#define PESDR0_460SX_HSSL7COEFA 0x32F + +#define PESDR1_460SX_HSSL0COEFA 0x358 +#define PESDR1_460SX_HSSL1COEFA 0x359 +#define PESDR1_460SX_HSSL2COEFA 0x35A +#define PESDR1_460SX_HSSL3COEFA 0x35B + +#define PESDR2_460SX_HSSL0COEFA 0x388 +#define PESDR2_460SX_HSSL1COEFA 0x389 +#define PESDR2_460SX_HSSL2COEFA 0x38A +#define PESDR2_460SX_HSSL3COEFA 0x38B + +#define PESDR0_460SX_HSSL1CALDRV 0x339 +#define PESDR1_460SX_HSSL1CALDRV 0x361 +#define PESDR2_460SX_HSSL1CALDRV 0x391 + +#define PESDR0_460SX_HSSSLEW 0x338 +#define PESDR1_460SX_HSSSLEW 0x360 +#define PESDR2_460SX_HSSSLEW 0x390 + +#define PESDR0_460SX_HSSCTLSET 0x31E +#define PESDR1_460SX_HSSCTLSET 0x352 +#define PESDR2_460SX_HSSCTLSET 0x382 + +#define PESDR0_460SX_RCSSET 0x304 +#define PESDR1_460SX_RCSSET 0x344 +#define PESDR2_460SX_RCSSET 0x374 +/* + * Of the above, some are common offsets from the base + */ +#define PESDRn_UTLSET1 0x00 +#define PESDRn_UTLSET2 0x01 +#define PESDRn_DLPSET 0x02 +#define PESDRn_LOOP 0x03 +#define PESDRn_RCSSET 0x04 +#define PESDRn_RCSSTS 0x05 + +/* 440spe only */ +#define PESDRn_440SPE_HSSL0SET1 0x06 +#define PESDRn_440SPE_HSSL0SET2 0x07 +#define PESDRn_440SPE_HSSL0STS 0x08 +#define PESDRn_440SPE_HSSL1SET1 0x09 +#define PESDRn_440SPE_HSSL1SET2 0x0a +#define PESDRn_440SPE_HSSL1STS 0x0b +#define PESDRn_440SPE_HSSL2SET1 0x0c +#define PESDRn_440SPE_HSSL2SET2 0x0d +#define PESDRn_440SPE_HSSL2STS 0x0e +#define PESDRn_440SPE_HSSL3SET1 0x0f +#define PESDRn_440SPE_HSSL3SET2 0x10 +#define PESDRn_440SPE_HSSL3STS 0x11 + +/* 440spe port 0 only */ +#define PESDRn_440SPE_HSSL4SET1 0x12 +#define PESDRn_440SPE_HSSL4SET2 0x13 +#define PESDRn_440SPE_HSSL4STS 0x14 +#define PESDRn_440SPE_HSSL5SET1 0x15 +#define PESDRn_440SPE_HSSL5SET2 0x16 +#define PESDRn_440SPE_HSSL5STS 0x17 +#define PESDRn_440SPE_HSSL6SET1 0x18 +#define PESDRn_440SPE_HSSL6SET2 0x19 +#define PESDRn_440SPE_HSSL6STS 0x1a +#define PESDRn_440SPE_HSSL7SET1 0x1b +#define PESDRn_440SPE_HSSL7SET2 0x1c +#define PESDRn_440SPE_HSSL7STS 0x1d + +/* 405ex only */ +#define PESDRn_405EX_PHYSET1 0x06 +#define PESDRn_405EX_PHYSET2 0x07 +#define PESDRn_405EX_PHYSTA 0x0c + +/* + * UTL register offsets + */ +#define PEUTL_PBCTL 0x00 +#define PEUTL_PBBSZ 0x20 +#define PEUTL_OPDBSZ 0x68 +#define PEUTL_IPHBSZ 0x70 +#define PEUTL_IPDBSZ 0x78 +#define PEUTL_OUTTR 0x90 +#define PEUTL_INTR 0x98 +#define PEUTL_PCTL 0xa0 +#define PEUTL_RCSTA 0xB0 +#define PEUTL_RCIRQEN 0xb8 + +/* + * Config space register offsets + */ +#define PECFG_ECRTCTL 0x074 + +#define PECFG_BAR0LMPA 0x210 +#define PECFG_BAR0HMPA 0x214 +#define PECFG_BAR1MPA 0x218 +#define PECFG_BAR2LMPA 0x220 +#define PECFG_BAR2HMPA 0x224 + +#define PECFG_PIMEN 0x33c +#define PECFG_PIM0LAL 0x340 +#define PECFG_PIM0LAH 0x344 +#define PECFG_PIM1LAL 0x348 +#define PECFG_PIM1LAH 0x34c +#define PECFG_PIM01SAL 0x350 +#define PECFG_PIM01SAH 0x354 + +#define PECFG_POM0LAL 0x380 +#define PECFG_POM0LAH 0x384 +#define PECFG_POM1LAL 0x388 +#define PECFG_POM1LAH 0x38c +#define PECFG_POM2LAL 0x390 +#define PECFG_POM2LAH 0x394 + +/* 460sx only */ +#define PECFG_460SX_DLLSTA 0x3f8 + +/* 460sx Bit Mappings */ +#define PECFG_460SX_DLLSTA_LINKUP 0x00000010 +#define DCRO_PEGPL_460SX_OMR1MSKL_UOT 0x00000004 + +/* PEGPL Bit Mappings */ +#define DCRO_PEGPL_OMRxMSKL_VAL 0x00000001 +#define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002 +#define DCRO_PEGPL_OMR3MSKL_IO 0x00000002 + +/* 476FPE */ +#define PCCFG_LCPA 0x270 +#define PECFG_TLDLP 0x3F8 +#define PECFG_TLDLP_LNKUP 0x00000008 +#define PECFG_TLDLP_PRESENT 0x00000010 +#define DCRO_PEGPL_476FPE_OMR1MSKL_UOT 0x00000004 + +/* SDR Bit Mappings */ +#define PESDRx_RCSSET_HLDPLB 0x10000000 +#define PESDRx_RCSSET_RSTGU 0x01000000 +#define PESDRx_RCSSET_RDY 0x00100000 +#define PESDRx_RCSSET_RSTDL 0x00010000 +#define PESDRx_RCSSET_RSTPYN 0x00001000 + +enum +{ + PTYPE_ENDPOINT = 0x0, + PTYPE_LEGACY_ENDPOINT = 0x1, + PTYPE_ROOT_PORT = 0x4, + + LNKW_X1 = 0x1, + LNKW_X4 = 0x4, + LNKW_X8 = 0x8 +}; + + +#endif /* __PPC4XX_PCI_H__ */ diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/4xx/soc.c new file mode 100644 index 000000000..ac1cd8b17 --- /dev/null +++ b/arch/powerpc/platforms/4xx/soc.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IBM/AMCC PPC4xx SoC setup code + * + * Copyright 2008 DENX Software Engineering, Stefan Roese + * + * L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is: + * Eugene Surovegin or + * Copyright (c) 2003 - 2006 Zultys Technologies + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static u32 dcrbase_l2c; + +/* + * L2-cache + */ + +/* Issue L2C diagnostic command */ +static inline u32 l2c_diag(u32 addr) +{ + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, addr); + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_DIAG); + while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) + ; + + return mfdcr(dcrbase_l2c + DCRN_L2C0_DATA); +} + +static irqreturn_t l2c_error_handler(int irq, void *dev) +{ + u32 sr = mfdcr(dcrbase_l2c + DCRN_L2C0_SR); + + if (sr & L2C_SR_CPE) { + /* Read cache trapped address */ + u32 addr = l2c_diag(0x42000000); + printk(KERN_EMERG "L2C: Cache Parity Error, addr[16:26] = 0x%08x\n", + addr); + } + if (sr & L2C_SR_TPE) { + /* Read tag trapped address */ + u32 addr = l2c_diag(0x82000000) >> 16; + printk(KERN_EMERG "L2C: Tag Parity Error, addr[16:26] = 0x%08x\n", + addr); + } + + /* Clear parity errors */ + if (sr & (L2C_SR_CPE | L2C_SR_TPE)){ + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + } else { + printk(KERN_EMERG "L2C: LRU error\n"); + } + + return IRQ_HANDLED; +} + +static int __init ppc4xx_l2c_probe(void) +{ + struct device_node *np; + u32 r; + unsigned long flags; + int irq; + const u32 *dcrreg; + u32 dcrbase_isram; + int len; + const u32 *prop; + u32 l2_size; + + np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache"); + if (!np) + return 0; + + /* Get l2 cache size */ + prop = of_get_property(np, "cache-size", NULL); + if (prop == NULL) { + printk(KERN_ERR "%pOF: Can't get cache-size!\n", np); + of_node_put(np); + return -ENODEV; + } + l2_size = prop[0]; + + /* Map DCRs */ + dcrreg = of_get_property(np, "dcr-reg", &len); + if (!dcrreg || (len != 4 * sizeof(u32))) { + printk(KERN_ERR "%pOF: Can't get DCR register base !", np); + of_node_put(np); + return -ENODEV; + } + dcrbase_isram = dcrreg[0]; + dcrbase_l2c = dcrreg[2]; + + /* Get and map irq number from device tree */ + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + printk(KERN_ERR "irq_of_parse_and_map failed\n"); + of_node_put(np); + return -ENODEV; + } + + /* Install error handler */ + if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) { + printk(KERN_ERR "Cannot install L2C error handler" + ", cache is not enabled\n"); + of_node_put(np); + return -ENODEV; + } + + local_irq_save(flags); + asm volatile ("sync" ::: "memory"); + + /* Disable SRAM */ + mtdcr(dcrbase_isram + DCRN_SRAM0_DPC, + mfdcr(dcrbase_isram + DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB0CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB1CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB2CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB3CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK); + + /* Enable L2_MODE without ICU/DCU */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG) & + ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK); + r |= L2C_CFG_L2M | L2C_CFG_SS_256; + mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); + + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); + + /* Hardware Clear Command */ + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_HCC); + while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) + ; + + /* Clear Cache Parity and Tag Errors */ + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + + /* Enable 64G snoop region starting at 0 */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP0) & + ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(dcrbase_l2c + DCRN_L2C0_SNP0, r); + + r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP1) & + ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(dcrbase_l2c + DCRN_L2C0_SNP1, r); + + asm volatile ("sync" ::: "memory"); + + /* Enable ICU/DCU ports */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG); + r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM + | L2C_CFG_TPEI | L2C_CFG_CPEI | L2C_CFG_NAM | L2C_CFG_NBRM); + r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN + | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM | L2C_CFG_SMCM; + + /* Check for 460EX/GT special handling */ + if (of_device_is_compatible(np, "ibm,l2-cache-460ex") || + of_device_is_compatible(np, "ibm,l2-cache-460gt")) + r |= L2C_CFG_RDBW; + + mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); + + asm volatile ("sync; isync" ::: "memory"); + local_irq_restore(flags); + + printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10); + + of_node_put(np); + return 0; +} +arch_initcall(ppc4xx_l2c_probe); + +/* + * Apply a system reset. Alternatively a board specific value may be + * provided via the "reset-type" property in the cpu node. + */ +void ppc4xx_reset_system(char *cmd) +{ + struct device_node *np; + u32 reset_type = DBCR0_RST_SYSTEM; + const u32 *prop; + + np = of_get_cpu_node(0, NULL); + if (np) { + prop = of_get_property(np, "reset-type", NULL); + + /* + * Check if property exists and if it is in range: + * 1 - PPC4xx core reset + * 2 - PPC4xx chip reset + * 3 - PPC4xx system reset (default) + */ + if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3))) + reset_type = prop[0] << 28; + } + + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type); + + while (1) + ; /* Just in case the reset doesn't work */ +} diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c new file mode 100644 index 000000000..d667ad039 --- /dev/null +++ b/arch/powerpc/platforms/4xx/uic.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * arch/powerpc/sysdev/uic.c + * + * IBM PowerPC 4xx Universal Interrupt Controller + * + * Copyright 2007 David Gibson , IBM Corporation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NR_UIC_INTS 32 + +#define UIC_SR 0x0 +#define UIC_ER 0x2 +#define UIC_CR 0x3 +#define UIC_PR 0x4 +#define UIC_TR 0x5 +#define UIC_MSR 0x6 +#define UIC_VR 0x7 +#define UIC_VCR 0x8 + +struct uic *primary_uic; + +struct uic { + int index; + int dcrbase; + + raw_spinlock_t lock; + + /* The remapper for this UIC */ + struct irq_domain *irqhost; +}; + +static void uic_unmask_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er, sr; + + sr = 1 << (31-src); + raw_spin_lock_irqsave(&uic->lock, flags); + /* ack level-triggered interrupts here */ + if (irqd_is_level_type(d)) + mtdcr(uic->dcrbase + UIC_SR, sr); + er = mfdcr(uic->dcrbase + UIC_ER); + er |= sr; + mtdcr(uic->dcrbase + UIC_ER, er); + raw_spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_mask_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er; + + raw_spin_lock_irqsave(&uic->lock, flags); + er = mfdcr(uic->dcrbase + UIC_ER); + er &= ~(1 << (31 - src)); + mtdcr(uic->dcrbase + UIC_ER, er); + raw_spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_ack_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + + raw_spin_lock_irqsave(&uic->lock, flags); + mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); + raw_spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_mask_ack_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er, sr; + + sr = 1 << (31-src); + raw_spin_lock_irqsave(&uic->lock, flags); + er = mfdcr(uic->dcrbase + UIC_ER); + er &= ~sr; + mtdcr(uic->dcrbase + UIC_ER, er); + /* On the UIC, acking (i.e. clearing the SR bit) + * a level irq will have no effect if the interrupt + * is still asserted by the device, even if + * the interrupt is already masked. Therefore + * we only ack the egde interrupts here, while + * level interrupts are ack'ed after the actual + * isr call in the uic_unmask_irq() + */ + if (!irqd_is_level_type(d)) + mtdcr(uic->dcrbase + UIC_SR, sr); + raw_spin_unlock_irqrestore(&uic->lock, flags); +} + +static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + int trigger, polarity; + u32 tr, pr, mask; + + switch (flow_type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_NONE: + uic_mask_irq(d); + return 0; + + case IRQ_TYPE_EDGE_RISING: + trigger = 1; polarity = 1; + break; + case IRQ_TYPE_EDGE_FALLING: + trigger = 1; polarity = 0; + break; + case IRQ_TYPE_LEVEL_HIGH: + trigger = 0; polarity = 1; + break; + case IRQ_TYPE_LEVEL_LOW: + trigger = 0; polarity = 0; + break; + default: + return -EINVAL; + } + + mask = ~(1 << (31 - src)); + + raw_spin_lock_irqsave(&uic->lock, flags); + tr = mfdcr(uic->dcrbase + UIC_TR); + pr = mfdcr(uic->dcrbase + UIC_PR); + tr = (tr & mask) | (trigger << (31-src)); + pr = (pr & mask) | (polarity << (31-src)); + + mtdcr(uic->dcrbase + UIC_PR, pr); + mtdcr(uic->dcrbase + UIC_TR, tr); + mtdcr(uic->dcrbase + UIC_SR, ~mask); + + raw_spin_unlock_irqrestore(&uic->lock, flags); + + return 0; +} + +static struct irq_chip uic_irq_chip = { + .name = "UIC", + .irq_unmask = uic_unmask_irq, + .irq_mask = uic_mask_irq, + .irq_mask_ack = uic_mask_ack_irq, + .irq_ack = uic_ack_irq, + .irq_set_type = uic_set_irq_type, +}; + +static int uic_host_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct uic *uic = h->host_data; + + irq_set_chip_data(virq, uic); + /* Despite the name, handle_level_irq() works for both level + * and edge irqs on UIC. FIXME: check this is correct */ + irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); + + /* Set default irq type */ + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + return 0; +} + +static const struct irq_domain_ops uic_host_ops = { + .map = uic_host_map, + .xlate = irq_domain_xlate_twocell, +}; + +static void uic_irq_cascade(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_data *idata = irq_desc_get_irq_data(desc); + struct uic *uic = irq_desc_get_handler_data(desc); + u32 msr; + int src; + + raw_spin_lock(&desc->lock); + if (irqd_is_level_type(idata)) + chip->irq_mask(idata); + else + chip->irq_mask_ack(idata); + raw_spin_unlock(&desc->lock); + + msr = mfdcr(uic->dcrbase + UIC_MSR); + if (!msr) /* spurious interrupt */ + goto uic_irq_ret; + + src = 32 - ffs(msr); + + generic_handle_domain_irq(uic->irqhost, src); + +uic_irq_ret: + raw_spin_lock(&desc->lock); + if (irqd_is_level_type(idata)) + chip->irq_ack(idata); + if (!irqd_irq_disabled(idata) && chip->irq_unmask) + chip->irq_unmask(idata); + raw_spin_unlock(&desc->lock); +} + +static struct uic * __init uic_init_one(struct device_node *node) +{ + struct uic *uic; + const u32 *indexp, *dcrreg; + int len; + + BUG_ON(! of_device_is_compatible(node, "ibm,uic")); + + uic = kzalloc(sizeof(*uic), GFP_KERNEL); + if (! uic) + return NULL; /* FIXME: panic? */ + + raw_spin_lock_init(&uic->lock); + indexp = of_get_property(node, "cell-index", &len); + if (!indexp || (len != sizeof(u32))) { + printk(KERN_ERR "uic: Device node %pOF has missing or invalid " + "cell-index property\n", node); + return NULL; + } + uic->index = *indexp; + + dcrreg = of_get_property(node, "dcr-reg", &len); + if (!dcrreg || (len != 2*sizeof(u32))) { + printk(KERN_ERR "uic: Device node %pOF has missing or invalid " + "dcr-reg property\n", node); + return NULL; + } + uic->dcrbase = *dcrreg; + + uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, + uic); + if (! uic->irqhost) + return NULL; /* FIXME: panic? */ + + /* Start with all interrupts disabled, level and non-critical */ + mtdcr(uic->dcrbase + UIC_ER, 0); + mtdcr(uic->dcrbase + UIC_CR, 0); + mtdcr(uic->dcrbase + UIC_TR, 0); + /* Clear any pending interrupts, in case the firmware left some */ + mtdcr(uic->dcrbase + UIC_SR, 0xffffffff); + + printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index, + NR_UIC_INTS, uic->dcrbase); + + return uic; +} + +void __init uic_init_tree(void) +{ + struct device_node *np; + struct uic *uic; + const u32 *interrupts; + + /* First locate and initialize the top-level UIC */ + for_each_compatible_node(np, NULL, "ibm,uic") { + interrupts = of_get_property(np, "interrupts", NULL); + if (!interrupts) + break; + } + + BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the + * top-level interrupt controller */ + primary_uic = uic_init_one(np); + if (!primary_uic) + panic("Unable to initialize primary UIC %pOF\n", np); + + irq_set_default_host(primary_uic->irqhost); + of_node_put(np); + + /* The scan again for cascaded UICs */ + for_each_compatible_node(np, NULL, "ibm,uic") { + interrupts = of_get_property(np, "interrupts", NULL); + if (interrupts) { + /* Secondary UIC */ + int cascade_virq; + + uic = uic_init_one(np); + if (! uic) + panic("Unable to initialize a secondary UIC %pOF\n", + np); + + cascade_virq = irq_of_parse_and_map(np, 0); + + irq_set_handler_data(cascade_virq, uic); + irq_set_chained_handler(cascade_virq, uic_irq_cascade); + + /* FIXME: setup critical cascade?? */ + } + } +} + +/* Return an interrupt vector or 0 if no interrupt is pending. */ +unsigned int uic_get_irq(void) +{ + u32 msr; + int src; + + BUG_ON(! primary_uic); + + msr = mfdcr(primary_uic->dcrbase + UIC_MSR); + src = 32 - ffs(msr); + + return irq_linear_revmap(primary_uic->irqhost, src); +} -- cgit v1.2.3