From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/mips/pci/Makefile | 52 + arch/mips/pci/fixup-ath79.c | 18 + arch/mips/pci/fixup-bcm63xx.c | 21 + arch/mips/pci/fixup-cobalt.c | 207 ++++ arch/mips/pci/fixup-fuloong2e.c | 221 ++++ arch/mips/pci/fixup-ip32.c | 52 + arch/mips/pci/fixup-lantiq.c | 27 + arch/mips/pci/fixup-lemote2f.c | 156 +++ arch/mips/pci/fixup-malta.c | 159 +++ arch/mips/pci/fixup-rbtx4927.c | 73 ++ arch/mips/pci/fixup-rc32434.c | 69 ++ arch/mips/pci/fixup-sb1250.c | 91 ++ arch/mips/pci/fixup-sni.c | 169 +++ arch/mips/pci/msi-octeon.c | 414 ++++++++ arch/mips/pci/ops-bcm63xx.c | 528 ++++++++++ arch/mips/pci/ops-bonito64.c | 148 +++ arch/mips/pci/ops-gt64xxx_pci0.c | 140 +++ arch/mips/pci/ops-lantiq.c | 113 +++ arch/mips/pci/ops-loongson2.c | 213 ++++ arch/mips/pci/ops-mace.c | 100 ++ arch/mips/pci/ops-msc.c | 134 +++ arch/mips/pci/ops-rc32434.c | 206 ++++ arch/mips/pci/ops-sni.c | 164 +++ arch/mips/pci/ops-tx4927.c | 524 ++++++++++ arch/mips/pci/pci-alchemy.c | 536 ++++++++++ arch/mips/pci/pci-ar2315.c | 521 ++++++++++ arch/mips/pci/pci-ar71xx.c | 401 ++++++++ arch/mips/pci/pci-ar724x.c | 447 ++++++++ arch/mips/pci/pci-bcm1480.c | 255 +++++ arch/mips/pci/pci-bcm1480ht.c | 203 ++++ arch/mips/pci/pci-bcm47xx.c | 98 ++ arch/mips/pci/pci-bcm63xx.c | 351 +++++++ arch/mips/pci/pci-bcm63xx.h | 33 + arch/mips/pci/pci-generic.c | 64 ++ arch/mips/pci/pci-ip27.c | 42 + arch/mips/pci/pci-ip32.c | 147 +++ arch/mips/pci/pci-lantiq.c | 249 +++++ arch/mips/pci/pci-lantiq.h | 16 + arch/mips/pci/pci-legacy.c | 307 ++++++ arch/mips/pci/pci-malta.c | 242 +++++ arch/mips/pci/pci-mt7620.c | 421 ++++++++ arch/mips/pci/pci-octeon.c | 711 +++++++++++++ arch/mips/pci/pci-rc32434.c | 231 +++++ arch/mips/pci/pci-rt2880.c | 278 +++++ arch/mips/pci/pci-rt3883.c | 582 +++++++++++ arch/mips/pci/pci-sb1250.c | 279 +++++ arch/mips/pci/pci-tx4927.c | 91 ++ arch/mips/pci/pci-tx4938.c | 142 +++ arch/mips/pci/pci-xtalk-bridge.c | 760 ++++++++++++++ arch/mips/pci/pci.c | 55 + arch/mips/pci/pcie-octeon.c | 2088 ++++++++++++++++++++++++++++++++++++++ 51 files changed, 13549 insertions(+) create mode 100644 arch/mips/pci/Makefile create mode 100644 arch/mips/pci/fixup-ath79.c create mode 100644 arch/mips/pci/fixup-bcm63xx.c create mode 100644 arch/mips/pci/fixup-cobalt.c create mode 100644 arch/mips/pci/fixup-fuloong2e.c create mode 100644 arch/mips/pci/fixup-ip32.c create mode 100644 arch/mips/pci/fixup-lantiq.c create mode 100644 arch/mips/pci/fixup-lemote2f.c create mode 100644 arch/mips/pci/fixup-malta.c create mode 100644 arch/mips/pci/fixup-rbtx4927.c create mode 100644 arch/mips/pci/fixup-rc32434.c create mode 100644 arch/mips/pci/fixup-sb1250.c create mode 100644 arch/mips/pci/fixup-sni.c create mode 100644 arch/mips/pci/msi-octeon.c create mode 100644 arch/mips/pci/ops-bcm63xx.c create mode 100644 arch/mips/pci/ops-bonito64.c create mode 100644 arch/mips/pci/ops-gt64xxx_pci0.c create mode 100644 arch/mips/pci/ops-lantiq.c create mode 100644 arch/mips/pci/ops-loongson2.c create mode 100644 arch/mips/pci/ops-mace.c create mode 100644 arch/mips/pci/ops-msc.c create mode 100644 arch/mips/pci/ops-rc32434.c create mode 100644 arch/mips/pci/ops-sni.c create mode 100644 arch/mips/pci/ops-tx4927.c create mode 100644 arch/mips/pci/pci-alchemy.c create mode 100644 arch/mips/pci/pci-ar2315.c create mode 100644 arch/mips/pci/pci-ar71xx.c create mode 100644 arch/mips/pci/pci-ar724x.c create mode 100644 arch/mips/pci/pci-bcm1480.c create mode 100644 arch/mips/pci/pci-bcm1480ht.c create mode 100644 arch/mips/pci/pci-bcm47xx.c create mode 100644 arch/mips/pci/pci-bcm63xx.c create mode 100644 arch/mips/pci/pci-bcm63xx.h create mode 100644 arch/mips/pci/pci-generic.c create mode 100644 arch/mips/pci/pci-ip27.c create mode 100644 arch/mips/pci/pci-ip32.c create mode 100644 arch/mips/pci/pci-lantiq.c create mode 100644 arch/mips/pci/pci-lantiq.h create mode 100644 arch/mips/pci/pci-legacy.c create mode 100644 arch/mips/pci/pci-malta.c create mode 100644 arch/mips/pci/pci-mt7620.c create mode 100644 arch/mips/pci/pci-octeon.c create mode 100644 arch/mips/pci/pci-rc32434.c create mode 100644 arch/mips/pci/pci-rt2880.c create mode 100644 arch/mips/pci/pci-rt3883.c create mode 100644 arch/mips/pci/pci-sb1250.c create mode 100644 arch/mips/pci/pci-tx4927.c create mode 100644 arch/mips/pci/pci-tx4938.c create mode 100644 arch/mips/pci/pci-xtalk-bridge.c create mode 100644 arch/mips/pci/pci.c create mode 100644 arch/mips/pci/pcie-octeon.c (limited to 'arch/mips/pci') diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile new file mode 100644 index 000000000..a6e9785b5 --- /dev/null +++ b/arch/mips/pci/Makefile @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the PCI specific kernel interface routines under Linux. +# + +obj-y += pci.o +obj-$(CONFIG_PCI_DRIVERS_LEGACY)+= pci-legacy.o +obj-$(CONFIG_PCI_DRIVERS_GENERIC)+= pci-generic.o + +# +# PCI bus host bridge specific code +# +obj-$(CONFIG_MIPS_BONITO64) += ops-bonito64.o +obj-$(CONFIG_PCI_GT64XXX_PCI0) += ops-gt64xxx_pci0.o +obj-$(CONFIG_MIPS_MSC) += ops-msc.o +obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o +obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o +obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \ + ops-bcm63xx.o +obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o +obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o +obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o +obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o +obj-$(CONFIG_PCI_XTALK_BRIDGE) += pci-xtalk-bridge.o +# +# These are still pretty much in the old state, watch, go blind. +# +obj-$(CONFIG_ATH79) += fixup-ath79.o +obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o +obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o +obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o +obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o +obj-$(CONFIG_SGI_IP27) += pci-ip27.o +obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o +obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o +obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o +obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o +obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o +obj-$(CONFIG_LANTIQ) += fixup-lantiq.o +obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o +obj-$(CONFIG_SOC_MT7620) += pci-mt7620.o +obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o +obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o +obj-$(CONFIG_SOC_TX4927) += pci-tx4927.o +obj-$(CONFIG_SOC_TX4938) += pci-tx4938.o +obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o +obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o + +ifdef CONFIG_PCI_MSI +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o +endif diff --git a/arch/mips/pci/fixup-ath79.c b/arch/mips/pci/fixup-ath79.c new file mode 100644 index 000000000..09a4ce534 --- /dev/null +++ b/arch/mips/pci/fixup-ath79.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 John Crispin + */ + +#include +//#include +#include + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return PCIBIOS_SUCCESSFUL; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} diff --git a/arch/mips/pci/fixup-bcm63xx.c b/arch/mips/pci/fixup-bcm63xx.c new file mode 100644 index 000000000..340863009 --- /dev/null +++ b/arch/mips/pci/fixup-bcm63xx.c @@ -0,0 +1,21 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon + */ + +#include +#include +#include + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return bcm63xx_get_irq_number(IRQ_PCI); +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c new file mode 100644 index 000000000..00206ff52 --- /dev/null +++ b/arch/mips/pci/fixup-cobalt.c @@ -0,0 +1,207 @@ +/* + * Cobalt Qube/Raq PCI support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 1997, 2002, 2003 by Ralf Baechle + * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv) + */ +#include +#include +#include +#include + +#include +#include + +#include +#include + +/* + * PCI slot numbers + */ +#define COBALT_PCICONF_CPU 0x06 +#define COBALT_PCICONF_ETH0 0x07 +#define COBALT_PCICONF_RAQSCSI 0x08 +#define COBALT_PCICONF_VIA 0x09 +#define COBALT_PCICONF_PCISLOT 0x0A +#define COBALT_PCICONF_ETH1 0x0C + +/* + * The Cobalt board ID information. The boards have an ID number wired + * into the VIA that is available in the high nibble of register 94. + */ +#define VIA_COBALT_BRD_ID_REG 0x94 +#define VIA_COBALT_BRD_REG_to_ID(reg) ((unsigned char)(reg) >> 4) + +/* + * Default value of PCI Class Code on GT64111 is PCI_CLASS_MEMORY_OTHER (0x0580) + * instead of PCI_CLASS_BRIDGE_HOST (0x0600). Galileo explained this choice in + * document "GT-64111 System Controller for RC4640, RM523X and VR4300 CPUs", + * section "6.5.3 PCI Autoconfiguration at RESET": + * + * Some PCs refuse to configure host bridges if they are found plugged into + * a PCI slot (ask the BIOS vendors why...). The "Memory Controller" Class + * Code does not cause a problem for these non-compliant BIOSes, so we used + * this as the default in the GT-64111. + * + * So fix the incorrect default value of PCI Class Code. More details are on: + * https://lore.kernel.org/r/20211102154831.xtrlgrmrizl5eidl@pali/ + * https://lore.kernel.org/r/20211102150201.GA11675@alpha.franken.de/ + */ +static void qube_raq_galileo_early_fixup(struct pci_dev *dev) +{ + if (dev->devfn == PCI_DEVFN(0, 0) && + (dev->class >> 8) == PCI_CLASS_MEMORY_OTHER) { + + dev->class = (PCI_CLASS_BRIDGE_HOST << 8) | (dev->class & 0xff); + + printk(KERN_INFO "Galileo: fixed bridge class\n"); + } +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111, + qube_raq_galileo_early_fixup); + +static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev) +{ + unsigned short cfgword; + unsigned char lt; + + /* Enable Bus Mastering and fast back to back. */ + pci_read_config_word(dev, PCI_COMMAND, &cfgword); + cfgword |= (PCI_COMMAND_FAST_BACK | PCI_COMMAND_MASTER); + pci_write_config_word(dev, PCI_COMMAND, cfgword); + + /* Enable both ide interfaces. ROM only enables primary one. */ + pci_write_config_byte(dev, 0x40, 0xb); + + /* Set latency timer to reasonable value. */ + pci_read_config_byte(dev, PCI_LATENCY_TIMER, <); + if (lt < 64) + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, + qube_raq_via_bmIDE_fixup); + +static void qube_raq_galileo_fixup(struct pci_dev *dev) +{ + if (dev->devfn != PCI_DEVFN(0, 0)) + return; + + /* Fix PCI latency-timer and cache-line-size values in Galileo + * host bridge. + */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8); + + /* + * The code described by the comment below has been removed + * as it causes bus mastering by the Ethernet controllers + * to break under any kind of network load. We always set + * the retry timeouts to their maximum. + * + * --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x-- + * + * On all machines prior to Q2, we had the STOP line disconnected + * from Galileo to VIA on PCI. The new Galileo does not function + * correctly unless we have it connected. + * + * Therefore we must set the disconnect/retry cycle values to + * something sensible when using the new Galileo. + */ + + printk(KERN_INFO "Galileo: revision %u\n", dev->revision); + +#if 0 + if (dev->revision >= 0x10) { + /* New Galileo, assumes PCI stop line to VIA is connected. */ + GT_WRITE(GT_PCI0_TOR_OFS, 0x4020); + } else if (dev->revision == 0x1 || dev->revision == 0x2) +#endif + { + signed int timeo; + /* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */ + timeo = GT_READ(GT_PCI0_TOR_OFS); + /* Old Galileo, assumes PCI STOP line to VIA is disconnected. */ + GT_WRITE(GT_PCI0_TOR_OFS, + (0xff << 16) | /* retry count */ + (0xff << 8) | /* timeout 1 */ + 0xff); /* timeout 0 */ + + /* enable PCI retry exceeded interrupt */ + GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS)); + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111, + qube_raq_galileo_fixup); + +int cobalt_board_id; + +static void qube_raq_via_board_id_fixup(struct pci_dev *dev) +{ + u8 id; + int retval; + + retval = pci_read_config_byte(dev, VIA_COBALT_BRD_ID_REG, &id); + if (retval) { + panic("Cannot read board ID"); + return; + } + + cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(id); + + printk(KERN_INFO "Cobalt board ID: %d\n", cobalt_board_id); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, + qube_raq_via_board_id_fixup); + +static char irq_tab_qube1[] = { + [COBALT_PCICONF_CPU] = 0, + [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ, + [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, + [COBALT_PCICONF_VIA] = 0, + [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ, + [COBALT_PCICONF_ETH1] = 0 +}; + +static char irq_tab_cobalt[] = { + [COBALT_PCICONF_CPU] = 0, + [COBALT_PCICONF_ETH0] = ETH0_IRQ, + [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, + [COBALT_PCICONF_VIA] = 0, + [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ, + [COBALT_PCICONF_ETH1] = ETH1_IRQ +}; + +static char irq_tab_raq2[] = { + [COBALT_PCICONF_CPU] = 0, + [COBALT_PCICONF_ETH0] = ETH0_IRQ, + [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ, + [COBALT_PCICONF_VIA] = 0, + [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ, + [COBALT_PCICONF_ETH1] = ETH1_IRQ +}; + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (cobalt_board_id <= COBALT_BRD_ID_QUBE1) + return irq_tab_qube1[slot]; + + if (cobalt_board_id == COBALT_BRD_ID_RAQ2) + return irq_tab_raq2[slot]; + + return irq_tab_cobalt[slot]; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c new file mode 100644 index 000000000..91aa92323 --- /dev/null +++ b/arch/mips/pci/fixup-fuloong2e.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2004 ICT CAS + * Author: Li xiaoyu, ICT CAS + * lixy@ict.ac.cn + * + * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology + * Author: Fuxin Zhang, zhangfx@lemote.com + */ +#include +#include + +#include + +/* South bridge slot number is set by the pci probe process */ +static u8 sb_slot = 5; + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = 0; + + if (slot == sb_slot) { + switch (PCI_FUNC(dev->devfn)) { + case 2: + irq = 10; + break; + case 3: + irq = 11; + break; + case 5: + irq = 9; + break; + } + } else { + irq = LOONGSON_IRQ_BASE + 25 + pin; + } + return irq; + +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static void loongson2e_nec_fixup(struct pci_dev *pdev) +{ + unsigned int val; + + /* Configures port 1, 2, 3, 4 to be validate*/ + pci_read_config_dword(pdev, 0xe0, &val); + pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x4); + + /* System clock is 48-MHz Oscillator. */ + pci_write_config_dword(pdev, 0xe4, 1 << 5); +} + +static void loongson2e_686b_func0_fixup(struct pci_dev *pdev) +{ + unsigned char c; + + sb_slot = PCI_SLOT(pdev->devfn); + + printk(KERN_INFO "via686b fix: ISA bridge\n"); + + /* Enable I/O Recovery time */ + pci_write_config_byte(pdev, 0x40, 0x08); + + /* Enable ISA refresh */ + pci_write_config_byte(pdev, 0x41, 0x01); + + /* disable ISA line buffer */ + pci_write_config_byte(pdev, 0x45, 0x00); + + /* Gate INTR, and flush line buffer */ + pci_write_config_byte(pdev, 0x46, 0xe0); + + /* Disable PCI Delay Transaction, Enable EISA ports 4D0/4D1. */ + /* pci_write_config_byte(pdev, 0x47, 0x20); */ + + /* + * enable PCI Delay Transaction, Enable EISA ports 4D0/4D1. + * enable time-out timer + */ + pci_write_config_byte(pdev, 0x47, 0xe6); + + /* + * enable level trigger on pci irqs: 9,10,11,13 + * important! without this PCI interrupts won't work + */ + outb(0x2e, 0x4d1); + + /* 512 K PCI Decode */ + pci_write_config_byte(pdev, 0x48, 0x01); + + /* Wait for PGNT before grant to ISA Master/DMA */ + pci_write_config_byte(pdev, 0x4a, 0x84); + + /* + * Plug'n'Play + * + * Parallel DRQ 3, Floppy DRQ 2 (default) + */ + pci_write_config_byte(pdev, 0x50, 0x0e); + + /* + * IRQ Routing for Floppy and Parallel port + * + * IRQ 6 for floppy, IRQ 7 for parallel port + */ + pci_write_config_byte(pdev, 0x51, 0x76); + + /* IRQ Routing for serial ports (take IRQ 3 and 4) */ + pci_write_config_byte(pdev, 0x52, 0x34); + + /* All IRQ's level triggered. */ + pci_write_config_byte(pdev, 0x54, 0x00); + + /* route PIRQA-D irq */ + pci_write_config_byte(pdev, 0x55, 0x90); /* bit 7-4, PIRQA */ + pci_write_config_byte(pdev, 0x56, 0xba); /* bit 7-4, PIRQC; */ + /* 3-0, PIRQB */ + pci_write_config_byte(pdev, 0x57, 0xd0); /* bit 7-4, PIRQD */ + + /* enable function 5/6, audio/modem */ + pci_read_config_byte(pdev, 0x85, &c); + c &= ~(0x3 << 2); + pci_write_config_byte(pdev, 0x85, c); + + printk(KERN_INFO"via686b fix: ISA bridge done\n"); +} + +static void loongson2e_686b_func1_fixup(struct pci_dev *pdev) +{ + printk(KERN_INFO"via686b fix: IDE\n"); + + /* Modify IDE controller setup */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 48); + pci_write_config_byte(pdev, PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER); + pci_write_config_byte(pdev, 0x40, 0x0b); + /* legacy mode */ + pci_write_config_byte(pdev, 0x42, 0x09); + +#if 1/* play safe, otherwise we may see notebook's usb keyboard lockup */ + /* disable read prefetch/write post buffers */ + pci_write_config_byte(pdev, 0x41, 0x02); + + /* use 3/4 as fifo thresh hold */ + pci_write_config_byte(pdev, 0x43, 0x0a); + pci_write_config_byte(pdev, 0x44, 0x00); + + pci_write_config_byte(pdev, 0x45, 0x00); +#else + pci_write_config_byte(pdev, 0x41, 0xc2); + pci_write_config_byte(pdev, 0x43, 0x35); + pci_write_config_byte(pdev, 0x44, 0x1c); + + pci_write_config_byte(pdev, 0x45, 0x10); +#endif + + printk(KERN_INFO"via686b fix: IDE done\n"); +} + +static void loongson2e_686b_func2_fixup(struct pci_dev *pdev) +{ + /* irq routing */ + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10); +} + +static void loongson2e_686b_func3_fixup(struct pci_dev *pdev) +{ + /* irq routing */ + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11); +} + +static void loongson2e_686b_func5_fixup(struct pci_dev *pdev) +{ + unsigned int val; + unsigned char c; + + /* enable IO */ + pci_write_config_byte(pdev, PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER); + pci_read_config_dword(pdev, 0x4, &val); + pci_write_config_dword(pdev, 0x4, val | 1); + + /* route ac97 IRQ */ + pci_write_config_byte(pdev, 0x3c, 9); + + pci_read_config_byte(pdev, 0x8, &c); + + /* link control: enable link & SGD PCM output */ + pci_write_config_byte(pdev, 0x41, 0xcc); + + /* disable game port, FM, midi, sb, enable write to reg2c-2f */ + pci_write_config_byte(pdev, 0x42, 0x20); + + /* we are using Avance logic codec */ + pci_write_config_word(pdev, 0x2c, 0x1005); + pci_write_config_word(pdev, 0x2e, 0x4710); + pci_read_config_dword(pdev, 0x2c, &val); + + pci_write_config_byte(pdev, 0x42, 0x0); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, + loongson2e_686b_func0_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, + loongson2e_686b_func1_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, + loongson2e_686b_func2_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, + loongson2e_686b_func3_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, + loongson2e_686b_func5_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, + loongson2e_nec_fixup); diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c new file mode 100644 index 000000000..d091ffc53 --- /dev/null +++ b/arch/mips/pci/fixup-ip32.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +/* + * O2 has up to 5 PCI devices connected into the MACE bridge. The device + * map looks like this: + * + * 0 aic7xxx 0 + * 1 aic7xxx 1 + * 2 expansion slot + * 3 N/C + * 4 N/C + */ + +#define SCSI0 MACEPCI_SCSI0_IRQ +#define SCSI1 MACEPCI_SCSI1_IRQ +#define INTA0 MACEPCI_SLOT0_IRQ +#define INTA1 MACEPCI_SLOT1_IRQ +#define INTA2 MACEPCI_SLOT2_IRQ +#define INTB MACEPCI_SHARED0_IRQ +#define INTC MACEPCI_SHARED1_IRQ +#define INTD MACEPCI_SHARED2_IRQ +static char irq_tab_mace[][5] = { + /* Dummy INT#A INT#B INT#C INT#D */ + {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ + {0, SCSI0, SCSI0, SCSI0, SCSI0}, + {0, SCSI1, SCSI1, SCSI1, SCSI1}, + {0, INTA0, INTB, INTC, INTD}, + {0, INTA1, INTC, INTD, INTB}, + {0, INTA2, INTD, INTB, INTC}, +}; + + +/* + * Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of + * the device (1-4 => A-D), tell what irq to use. Note that we don't + * in theory have slots 4 and 5, and we never normally use the shared + * irqs. I suppose a device without a pin A will thank us for doing it + * right if there exists such a broken piece of crap. + */ +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return irq_tab_mace[slot][pin]; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c new file mode 100644 index 000000000..105569c1b --- /dev/null +++ b/arch/mips/pci/fixup-lantiq.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2012 John Crispin + */ + +#include +#include + +int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL; +int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL; + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + if (ltq_pci_plat_arch_init) + return ltq_pci_plat_arch_init(dev); + + if (ltq_pci_plat_dev_init) + return ltq_pci_plat_dev_init(dev); + + return 0; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c new file mode 100644 index 000000000..afafda03e --- /dev/null +++ b/arch/mips/pci/fixup-lemote2f.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2008 Lemote Technology + * Copyright (C) 2004 ICT CAS + * Author: Li xiaoyu, lixy@ict.ac.cn + * + * Copyright (C) 2007 Lemote, Inc. + * Author: Fuxin Zhang, zhangfx@lemote.com + */ +#include +#include + +#include +#include +#include + +/* PCI interrupt pins + * + * These should not be changed, or you should consider loongson2f interrupt + * register and your pci card dispatch + */ + +#define PCIA 4 +#define PCIB 5 +#define PCIC 6 +#define PCID 7 + +/* all the pci device has the PCIA pin, check the datasheet. */ +static char irq_tab[][5] = { + /* INTA INTB INTC INTD */ + {0, 0, 0, 0, 0}, /* 11: Unused */ + {0, 0, 0, 0, 0}, /* 12: Unused */ + {0, 0, 0, 0, 0}, /* 13: Unused */ + {0, 0, 0, 0, 0}, /* 14: Unused */ + {0, 0, 0, 0, 0}, /* 15: Unused */ + {0, 0, 0, 0, 0}, /* 16: Unused */ + {0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */ + {0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */ + {0, PCIC, 0, 0, 0}, /* 19: SiI3114 */ + {0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */ + {0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */ + {0, 0, 0, 0, 0}, /* 22: Unused */ + {0, 0, 0, 0, 0}, /* 23: Unused */ + {0, 0, 0, 0, 0}, /* 24: Unused */ + {0, 0, 0, 0, 0}, /* 25: Unused */ + {0, 0, 0, 0, 0}, /* 26: Unused */ + {0, 0, 0, 0, 0}, /* 27: Unused */ +}; + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int virq; + + if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536) + && (PCI_SLOT(dev->devfn) < 32)) { + virq = irq_tab[slot][pin]; + printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin, + virq + LOONGSON_IRQ_BASE); + if (virq != 0) + return LOONGSON_IRQ_BASE + virq; + else + return 0; + } else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */ + switch (PCI_FUNC(dev->devfn)) { + case 2: + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, + CS5536_IDE_INTR); + return CS5536_IDE_INTR; /* for IDE */ + case 3: + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, + CS5536_ACC_INTR); + return CS5536_ACC_INTR; /* for AUDIO */ + case 4: /* for OHCI */ + case 5: /* for EHCI */ + case 6: /* for UDC */ + case 7: /* for OTG */ + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, + CS5536_USB_INTR); + return CS5536_USB_INTR; + } + return dev->irq; + } else { + printk(KERN_INFO "strange PCI slot number.\n"); + return 0; + } +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +/* CS5536 SPEC. fixup */ +static void loongson_cs5536_isa_fixup(struct pci_dev *pdev) +{ + /* the uart1 and uart2 interrupt in PIC is enabled as default */ + pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1); + pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1); +} + +static void loongson_cs5536_ide_fixup(struct pci_dev *pdev) +{ + /* setting the mutex pin as IDE function */ + pci_write_config_dword(pdev, PCI_IDE_CFG_REG, + CS5536_IDE_FLASH_SIGNATURE); +} + +static void loongson_cs5536_acc_fixup(struct pci_dev *pdev) +{ + /* enable the AUDIO interrupt in PIC */ + pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1); + + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0); +} + +static void loongson_cs5536_ohci_fixup(struct pci_dev *pdev) +{ + /* enable the OHCI interrupt in PIC */ + /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */ + pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1); +} + +static void loongson_cs5536_ehci_fixup(struct pci_dev *pdev) +{ + u32 hi, lo; + + /* Serial short detect enable */ + _rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo); + _wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 3), lo); + + /* setting the USB2.0 micro frame length */ + pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000); +} + +static void loongson_nec_fixup(struct pci_dev *pdev) +{ + unsigned int val; + + pci_read_config_dword(pdev, 0xe0, &val); + /* Only 2 port be used */ + pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, + loongson_cs5536_isa_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC, + loongson_cs5536_ohci_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC, + loongson_cs5536_ehci_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO, + loongson_cs5536_acc_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, + loongson_cs5536_ide_fixup); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, + loongson_nec_fixup); diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c new file mode 100644 index 000000000..8131e0ffe --- /dev/null +++ b/arch/mips/pci/fixup-malta.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +/* PCI interrupt pins */ +#define PCIA 1 +#define PCIB 2 +#define PCIC 3 +#define PCID 4 + +/* This table is filled in by interrogating the PIIX4 chip */ +static char pci_irq[5] = { +}; + +static char irq_tab[][5] = { + /* INTA INTB INTC INTD */ + {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */ + {0, 0, 0, 0, 0 }, /* 1: Unused */ + {0, 0, 0, 0, 0 }, /* 2: Unused */ + {0, 0, 0, 0, 0 }, /* 3: Unused */ + {0, 0, 0, 0, 0 }, /* 4: Unused */ + {0, 0, 0, 0, 0 }, /* 5: Unused */ + {0, 0, 0, 0, 0 }, /* 6: Unused */ + {0, 0, 0, 0, 0 }, /* 7: Unused */ + {0, 0, 0, 0, 0 }, /* 8: Unused */ + {0, 0, 0, 0, 0 }, /* 9: Unused */ + {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */ + {0, PCIB, 0, 0, 0 }, /* 11: AMD 79C973 Ethernet */ + {0, PCIC, 0, 0, 0 }, /* 12: Crystal 4281 Sound */ + {0, 0, 0, 0, 0 }, /* 13: Unused */ + {0, 0, 0, 0, 0 }, /* 14: Unused */ + {0, 0, 0, 0, 0 }, /* 15: Unused */ + {0, 0, 0, 0, 0 }, /* 16: Unused */ + {0, 0, 0, 0, 0 }, /* 17: Bonito/SOC-it PCI Bridge*/ + {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */ + {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */ + {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */ + {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */ +}; + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int virq; + virq = irq_tab[slot][pin]; + return pci_irq[virq]; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static void malta_piix_func3_base_fixup(struct pci_dev *dev) +{ + /* Set a sane PM I/O base address */ + pci_write_config_word(dev, PIIX4_FUNC3_PMBA, 0x1000); + + /* Enable access to the PM I/O region */ + pci_write_config_byte(dev, PIIX4_FUNC3_PMREGMISC, + PIIX4_FUNC3_PMREGMISC_EN); +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, + malta_piix_func3_base_fixup); + +static void malta_piix_func0_fixup(struct pci_dev *pdev) +{ + unsigned char reg_val; + u32 reg_val32; + u16 reg_val16; + /* PIIX PIRQC[A:D] irq mappings */ + static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = { + 0, 0, 0, 3, + 4, 5, 6, 7, + 0, 9, 10, 11, + 12, 0, 14, 15 + }; + int i; + + /* Interrogate PIIX4 to get PCI IRQ mapping */ + for (i = 0; i <= 3; i++) { + pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, ®_val); + if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE) + pci_irq[PCIA+i] = 0; /* Disabled */ + else + pci_irq[PCIA+i] = piixirqmap[reg_val & + PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK]; + } + + /* Done by YAMON 2.00 onwards */ + if (PCI_SLOT(pdev->devfn) == 10) { + /* + * Set top of main memory accessible by ISA or DMA + * devices to 16 Mb. + */ + pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, ®_val); + pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val | + PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK); + } + + /* Mux SERIRQ to its pin */ + pci_read_config_dword(pdev, PIIX4_FUNC0_GENCFG, ®_val32); + pci_write_config_dword(pdev, PIIX4_FUNC0_GENCFG, + reg_val32 | PIIX4_FUNC0_GENCFG_SERIRQ); + + /* Enable SERIRQ */ + pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, ®_val); + reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT; + pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val); + + /* Enable response to special cycles */ + pci_read_config_word(pdev, PCI_COMMAND, ®_val16); + pci_write_config_word(pdev, PCI_COMMAND, + reg_val16 | PCI_COMMAND_SPECIAL); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, + malta_piix_func0_fixup); + +static void malta_piix_func1_fixup(struct pci_dev *pdev) +{ + unsigned char reg_val; + + /* Done by YAMON 2.02 onwards */ + if (PCI_SLOT(pdev->devfn) == 10) { + /* + * IDE Decode enable. + */ + pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI, + ®_val); + pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI, + reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN); + pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI, + ®_val); + pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI, + reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN); + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB, + malta_piix_func1_fixup); + +/* Enable PCI 2.1 compatibility in PIIX4 */ +static void quirk_dlcsetup(struct pci_dev *dev) +{ + u8 odlc, ndlc; + + (void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc); + /* Enable passive releases and delayed transaction */ + ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN | + PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN | + PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN; + (void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc); +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, + quirk_dlcsetup); diff --git a/arch/mips/pci/fixup-rbtx4927.c b/arch/mips/pci/fixup-rbtx4927.c new file mode 100644 index 000000000..d6aaed1d6 --- /dev/null +++ b/arch/mips/pci/fixup-rbtx4927.c @@ -0,0 +1,73 @@ +/* + * + * BRIEF MODULE DESCRIPTION + * Board specific pci fixups for the Toshiba rbtx4927 + * + * Copyright 2001 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * ppopov@mvista.com or source@mvista.com + * + * Copyright (C) 2000-2001 Toshiba Corporation + * + * Copyright (C) 2004 MontaVista Software Inc. + * Author: Manish Lachwani (mlachwani@mvista.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include + +int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + unsigned char irq = pin; + + /* IRQ rotation */ + irq--; /* 0-3 */ + if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) { + /* PCI CardSlot (IDSEL=A23) */ + /* PCIA => PCIA */ + irq = (irq + 0 + slot) % 4; + } else { + /* PCI Backplane */ + if (txx9_pci_option & TXX9_PCI_OPT_PICMG) + irq = (irq + 33 - slot) % 4; + else + irq = (irq + 3 + slot) % 4; + } + irq++; /* 1-4 */ + + switch (irq) { + case 1: + irq = RBTX4927_IRQ_IOC_PCIA; + break; + case 2: + irq = RBTX4927_IRQ_IOC_PCIB; + break; + case 3: + irq = RBTX4927_IRQ_IOC_PCIC; + break; + case 4: + irq = RBTX4927_IRQ_IOC_PCID; + break; + } + return irq; +} diff --git a/arch/mips/pci/fixup-rc32434.c b/arch/mips/pci/fixup-rc32434.c new file mode 100644 index 000000000..7fcafd5da --- /dev/null +++ b/arch/mips/pci/fixup-rc32434.c @@ -0,0 +1,69 @@ +/* + * Copyright 2001 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * stevel@mvista.com or source@mvista.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#include +#include + +static int irq_map[2][12] = { + {0, 0, 2, 3, 2, 3, 0, 0, 0, 0, 0, 1}, + {0, 0, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3} +}; + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = 0; + + if (dev->bus->number < 2 && PCI_SLOT(dev->devfn) < 12) + irq = irq_map[dev->bus->number][PCI_SLOT(dev->devfn)]; + + return irq + GROUP4_IRQ_BASE + 4; +} + +static void rc32434_pci_early_fixup(struct pci_dev *dev) +{ + if (PCI_SLOT(dev->devfn) == 6 && dev->bus->number == 0) { + /* disable prefetched memory range */ + pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, 0); + pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, 0x10); + + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 4); + } +} + +/* + * The fixup applies to both the IDT and VIA devices present on the board + */ +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, rc32434_pci_early_fixup); + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c new file mode 100644 index 000000000..3f914c33b --- /dev/null +++ b/arch/mips/pci/fixup-sb1250.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki + * Copyright (C) 2018 Maciej W. Rozycki + */ + +#include +#include + +/* + * Set the BCM1250, etc. PCI host bridge's TRDY timeout + * to the finite max. + */ +static void quirk_sb1250_pci(struct pci_dev *dev) +{ + pci_write_config_byte(dev, 0x40, 0xff); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, + quirk_sb1250_pci); + +/* + * The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit + * bus, so we set the bus's DMA limit accordingly. However the HT link + * down the artificial PCI-HT bridge supports 40-bit addressing and the + * SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus + * width, so we record the PCI-HT bridge's secondary and subordinate bus + * numbers and do not set the limit for devices present in the inclusive + * range of those. + */ +struct sb1250_bus_dma_limit_exclude { + bool set; + unsigned char start; + unsigned char end; +}; + +static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data) +{ + struct sb1250_bus_dma_limit_exclude *exclude = data; + bool exclude_this; + bool ht_bridge; + + exclude_this = exclude->set && (dev->bus->number >= exclude->start && + dev->bus->number <= exclude->end); + ht_bridge = !exclude->set && (dev->vendor == PCI_VENDOR_ID_SIBYTE && + dev->device == PCI_DEVICE_ID_BCM1250_HT); + + if (exclude_this) { + dev_dbg(&dev->dev, "not disabling DAC for device"); + } else if (ht_bridge) { + exclude->start = dev->subordinate->number; + exclude->end = pci_bus_max_busnr(dev->subordinate); + exclude->set = true; + dev_dbg(&dev->dev, "not disabling DAC for [bus %02x-%02x]", + exclude->start, exclude->end); + } else { + dev_dbg(&dev->dev, "disabling DAC for device"); + dev->dev.bus_dma_limit = DMA_BIT_MASK(32); + } + + return 0; +} + +static void quirk_sb1250_pci_dac(struct pci_dev *dev) +{ + struct sb1250_bus_dma_limit_exclude exclude = { .set = false }; + + pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, + quirk_sb1250_pci_dac); + +/* + * The BCM1250, etc. PCI/HT bridge reports as a host bridge. + */ +static void quirk_sb1250_ht(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT, + quirk_sb1250_ht); + +/* + * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max. + */ +static void quirk_sp1011(struct pci_dev *dev) +{ + pci_write_config_byte(dev, 0x64, 0xff); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIPACKETS, PCI_DEVICE_ID_SP1011, + quirk_sp1011); diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c new file mode 100644 index 000000000..de012f8bd --- /dev/null +++ b/arch/mips/pci/fixup-sni.c @@ -0,0 +1,169 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SNI specific PCI support for RM200/RM300. + * + * Copyright (C) 1997 - 2000, 2003, 04 Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include + +#include +#include + +#include + +/* + * PCIMT Shortcuts ... + */ +#define SCSI PCIMT_IRQ_SCSI +#define ETH PCIMT_IRQ_ETHERNET +#define INTA PCIMT_IRQ_INTA +#define INTB PCIMT_IRQ_INTB +#define INTC PCIMT_IRQ_INTC +#define INTD PCIMT_IRQ_INTD + +/* + * Device 0: PCI EISA Bridge (directly routed) + * Device 1: NCR53c810 SCSI (directly routed) + * Device 2: PCnet32 Ethernet (directly routed) + * Device 3: VGA (routed to INTB) + * Device 4: Unused + * Device 5: Slot 2 + * Device 6: Slot 3 + * Device 7: Slot 4 + * + * Documentation says the VGA is device 5 and device 3 is unused but that + * seem to be a documentation error. At least on my RM200C the Cirrus + * Logic CL-GD5434 VGA is device 3. + */ +static char irq_tab_rm200[8][5] = { + /* INTA INTB INTC INTD */ + { 0, 0, 0, 0, 0 }, /* EISA bridge */ + { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ + { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ + { INTB, INTB, INTB, INTB, INTB }, /* VGA */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ + { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ + { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ +}; + +/* + * In Revision D of the RM300 Device 2 has become a normal purpose Slot 1 + * + * The VGA card is optional for RM300 systems. + */ +static char irq_tab_rm300d[8][5] = { + /* INTA INTB INTC INTD */ + { 0, 0, 0, 0, 0 }, /* EISA bridge */ + { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ + { 0, INTC, INTD, INTA, INTB }, /* Slot 1 */ + { INTB, INTB, INTB, INTB, INTB }, /* VGA */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ + { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ + { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ +}; + +static char irq_tab_rm300e[5][5] = { + /* INTA INTB INTC INTD */ + { 0, 0, 0, 0, 0 }, /* HOST bridge */ + { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ + { 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */ + { 0, INTD, INTA, INTB, INTC }, /* Slot 1 */ + { 0, INTA, INTB, INTC, INTD }, /* Slot 2 */ +}; +#undef SCSI +#undef ETH +#undef INTA +#undef INTB +#undef INTC +#undef INTD + + +/* + * PCIT Shortcuts ... + */ +#define SCSI0 PCIT_IRQ_SCSI0 +#define SCSI1 PCIT_IRQ_SCSI1 +#define ETH PCIT_IRQ_ETHERNET +#define INTA PCIT_IRQ_INTA +#define INTB PCIT_IRQ_INTB +#define INTC PCIT_IRQ_INTC +#define INTD PCIT_IRQ_INTD + +static char irq_tab_pcit[13][5] = { + /* INTA INTB INTC INTD */ + { 0, 0, 0, 0, 0 }, /* HOST bridge */ + { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ + { SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */ + { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ + { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */ + { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ + { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ + { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ + { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ +}; + +static char irq_tab_pcit_cplus[13][5] = { + /* INTA INTB INTC INTD */ + { 0, 0, 0, 0, 0 }, /* HOST bridge */ + { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ + { 0, 0, 0, 0, 0 }, /* PCI-EISA */ + { 0, 0, 0, 0, 0 }, /* Unused */ + { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ + { 0, INTB, INTC, INTD, INTA }, /* fixup */ +}; + +static inline int is_rm300_revd(void) +{ + unsigned char csmsr = *(volatile unsigned char *)PCIMT_CSMSR; + + return (csmsr & 0xa0) == 0x20; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + switch (sni_brd_type) { + case SNI_BRD_PCI_TOWER_CPLUS: + if (slot == 4) { + /* + * SNI messed up interrupt wiring for onboard + * PCI bus 1; we need to fix this up here + */ + while (dev && dev->bus->number != 1) + dev = dev->bus->self; + if (dev && dev->devfn >= PCI_DEVFN(4, 0)) + slot = 5; + } + return irq_tab_pcit_cplus[slot][pin]; + case SNI_BRD_PCI_TOWER: + return irq_tab_pcit[slot][pin]; + + case SNI_BRD_PCI_MTOWER: + if (is_rm300_revd()) + return irq_tab_rm300d[slot][pin]; + fallthrough; + case SNI_BRD_PCI_DESKTOP: + return irq_tab_rm200[slot][pin]; + + case SNI_BRD_PCI_MTOWER_CPLUS: + return irq_tab_rm300e[slot][pin]; + } + + return 0; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c new file mode 100644 index 000000000..abc3b61bf --- /dev/null +++ b/arch/mips/pci/msi-octeon.c @@ -0,0 +1,414 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2009, 2010 Cavium Networks + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is + * in use. + */ +static u64 msi_free_irq_bitmask[4]; + +/* + * Each bit in msi_multiple_irq_bitmask tells that the device using + * this bit in msi_free_irq_bitmask is also using the next bit. This + * is used so we can disable all of the MSI interrupts when a device + * uses multiple. + */ +static u64 msi_multiple_irq_bitmask[4]; + +/* + * This lock controls updates to msi_free_irq_bitmask and + * msi_multiple_irq_bitmask. + */ +static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock); + +/* + * Number of MSI IRQs used. This variable is set up in + * the module init time. + */ +static int msi_irq_size; + +/** + * arch_setup_msi_irq() - setup MSI IRQs for a device + * @dev: Device requesting MSI interrupts + * @desc: MSI descriptor + * + * Called when a driver requests MSI interrupts instead of the + * legacy INT A-D. This routine will allocate multiple interrupts + * for MSI devices that support them. A device can override this by + * programming the MSI control bits [6:4] before calling + * pci_enable_msi(). + * + * Return: %0 on success, non-%0 on error. + */ +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + struct msi_msg msg; + u16 control; + int configured_private_bits; + int request_private_bits; + int irq = 0; + int irq_step; + u64 search_mask; + int index; + + if (desc->pci.msi_attrib.is_msix) + return -EINVAL; + + /* + * Read the MSI config to figure out how many IRQs this device + * wants. Most devices only want 1, which will give + * configured_private_bits and request_private_bits equal 0. + */ + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + + /* + * If the number of private bits has been configured then use + * that value instead of the requested number. This gives the + * driver the chance to override the number of interrupts + * before calling pci_enable_msi(). + */ + configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; + if (configured_private_bits == 0) { + /* Nothing is configured, so use the hardware requested size */ + request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1; + } else { + /* + * Use the number of configured bits, assuming the + * driver wanted to override the hardware request + * value. + */ + request_private_bits = configured_private_bits; + } + + /* + * The PCI 2.3 spec mandates that there are at most 32 + * interrupts. If this device asks for more, only give it one. + */ + if (request_private_bits > 5) + request_private_bits = 0; + +try_only_one: + /* + * The IRQs have to be aligned on a power of two based on the + * number being requested. + */ + irq_step = 1 << request_private_bits; + + /* Mask with one bit for each IRQ */ + search_mask = (1 << irq_step) - 1; + + /* + * We're going to search msi_free_irq_bitmask_lock for zero + * bits. This represents an MSI interrupt number that isn't in + * use. + */ + spin_lock(&msi_free_irq_bitmask_lock); + for (index = 0; index < msi_irq_size/64; index++) { + for (irq = 0; irq < 64; irq += irq_step) { + if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) { + msi_free_irq_bitmask[index] |= search_mask << irq; + msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq; + goto msi_irq_allocated; + } + } + } +msi_irq_allocated: + spin_unlock(&msi_free_irq_bitmask_lock); + + /* Make sure the search for available interrupts didn't fail */ + if (irq >= 64) { + if (request_private_bits) { + pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one", + 1 << request_private_bits); + request_private_bits = 0; + goto try_only_one; + } else + panic("arch_setup_msi_irq: Unable to find a free MSI interrupt"); + } + + /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */ + irq += index*64; + irq += OCTEON_IRQ_MSI_BIT0; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_SMALL: + /* When not using big bar, Bar 0 is based at 128MB */ + msg.address_lo = + ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; + break; + case OCTEON_DMA_BAR_TYPE_BIG: + /* When using big bar, Bar 0 is based at 0 */ + msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32; + break; + case OCTEON_DMA_BAR_TYPE_PCIE: + /* When using PCIe, Bar 0 is based at 0 */ + /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ + msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; + break; + case OCTEON_DMA_BAR_TYPE_PCIE2: + /* When using PCIe2, Bar 0 is based at 0 */ + msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32; + break; + default: + panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type"); + } + msg.data = irq - OCTEON_IRQ_MSI_BIT0; + + /* Update the number of IRQs the device has available to it */ + control &= ~PCI_MSI_FLAGS_QSIZE; + control |= request_private_bits << 4; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); + + irq_set_msi_desc(irq, desc); + pci_write_msi_msg(irq, &msg); + return 0; +} + +/** + * arch_teardown_msi_irq() - release MSI IRQs for a device + * @irq: The devices first irq number. There may be multiple in sequence. + * + * Called when a device no longer needs its MSI interrupts. All + * MSI interrupts for the device are freed. + */ +void arch_teardown_msi_irq(unsigned int irq) +{ + int number_irqs; + u64 bitmask; + int index = 0; + int irq0; + + if ((irq < OCTEON_IRQ_MSI_BIT0) + || (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0)) + panic("arch_teardown_msi_irq: Attempted to teardown illegal " + "MSI interrupt (%d)", irq); + + irq -= OCTEON_IRQ_MSI_BIT0; + index = irq / 64; + irq0 = irq % 64; + + /* + * Count the number of IRQs we need to free by looking at the + * msi_multiple_irq_bitmask. Each bit set means that the next + * IRQ is also owned by this device. + */ + number_irqs = 0; + while ((irq0 + number_irqs < 64) && + (msi_multiple_irq_bitmask[index] + & (1ull << (irq0 + number_irqs)))) + number_irqs++; + number_irqs++; + /* Mask with one bit for each IRQ */ + bitmask = (1 << number_irqs) - 1; + /* Shift the mask to the correct bit location */ + bitmask <<= irq0; + if ((msi_free_irq_bitmask[index] & bitmask) != bitmask) + panic("arch_teardown_msi_irq: Attempted to teardown MSI " + "interrupt (%d) not in use", irq); + + /* Checks are done, update the in use bitmask */ + spin_lock(&msi_free_irq_bitmask_lock); + msi_free_irq_bitmask[index] &= ~bitmask; + msi_multiple_irq_bitmask[index] &= ~bitmask; + spin_unlock(&msi_free_irq_bitmask_lock); +} + +static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); + +static u64 msi_rcv_reg[4]; +static u64 mis_ena_reg[4]; + +static void octeon_irq_msi_enable_pcie(struct irq_data *data) +{ + u64 en; + unsigned long flags; + int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; + int irq_index = msi_number >> 6; + int irq_bit = msi_number & 0x3f; + + raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); + en = cvmx_read_csr(mis_ena_reg[irq_index]); + en |= 1ull << irq_bit; + cvmx_write_csr(mis_ena_reg[irq_index], en); + cvmx_read_csr(mis_ena_reg[irq_index]); + raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); +} + +static void octeon_irq_msi_disable_pcie(struct irq_data *data) +{ + u64 en; + unsigned long flags; + int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; + int irq_index = msi_number >> 6; + int irq_bit = msi_number & 0x3f; + + raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); + en = cvmx_read_csr(mis_ena_reg[irq_index]); + en &= ~(1ull << irq_bit); + cvmx_write_csr(mis_ena_reg[irq_index], en); + cvmx_read_csr(mis_ena_reg[irq_index]); + raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); +} + +static struct irq_chip octeon_irq_chip_msi_pcie = { + .name = "MSI", + .irq_enable = octeon_irq_msi_enable_pcie, + .irq_disable = octeon_irq_msi_disable_pcie, +}; + +static void octeon_irq_msi_enable_pci(struct irq_data *data) +{ + /* + * Octeon PCI doesn't have the ability to mask/unmask MSI + * interrupts individually. Instead of masking/unmasking them + * in groups of 16, we simple assume MSI devices are well + * behaved. MSI interrupts are always enable and the ACK is + * assumed to be enough + */ +} + +static void octeon_irq_msi_disable_pci(struct irq_data *data) +{ + /* See comment in enable */ +} + +static struct irq_chip octeon_irq_chip_msi_pci = { + .name = "MSI", + .irq_enable = octeon_irq_msi_enable_pci, + .irq_disable = octeon_irq_msi_disable_pci, +}; + +/* + * Called by the interrupt handling code when an MSI interrupt + * occurs. + */ +static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits) +{ + int irq; + int bit; + + bit = fls64(msi_bits); + if (bit) { + bit--; + /* Acknowledge it first. */ + cvmx_write_csr(msi_rcv_reg[index], 1ull << bit); + + irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index; + do_IRQ(irq); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +#define OCTEON_MSI_INT_HANDLER_X(x) \ +static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \ +{ \ + u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \ + return __octeon_msi_do_interrupt((x), msi_bits); \ +} + +/* + * Create octeon_msi_interrupt{0-3} function body + */ +OCTEON_MSI_INT_HANDLER_X(0); +OCTEON_MSI_INT_HANDLER_X(1); +OCTEON_MSI_INT_HANDLER_X(2); +OCTEON_MSI_INT_HANDLER_X(3); + +/* + * Initializes the MSI interrupt handling code + */ +int __init octeon_msi_initialize(void) +{ + int irq; + struct irq_chip *msi; + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { + return 0; + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { + msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; + msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; + msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; + msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3; + mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0; + mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1; + mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2; + mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3; + msi = &octeon_irq_chip_msi_pcie; + } else { + msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV; +#define INVALID_GENERATE_ADE 0x8700000000000000ULL; + msi_rcv_reg[1] = INVALID_GENERATE_ADE; + msi_rcv_reg[2] = INVALID_GENERATE_ADE; + msi_rcv_reg[3] = INVALID_GENERATE_ADE; + mis_ena_reg[0] = INVALID_GENERATE_ADE; + mis_ena_reg[1] = INVALID_GENERATE_ADE; + mis_ena_reg[2] = INVALID_GENERATE_ADE; + mis_ena_reg[3] = INVALID_GENERATE_ADE; + msi = &octeon_irq_chip_msi_pci; + } + + for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) + irq_set_chip_and_handler(irq, msi, handle_simple_irq); + + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { + if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, + 0, "MSI[0:63]", octeon_msi_interrupt0)) + panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1, + 0, "MSI[64:127]", octeon_msi_interrupt1)) + panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2, + 0, "MSI[127:191]", octeon_msi_interrupt2)) + panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3, + 0, "MSI[192:255]", octeon_msi_interrupt3)) + panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); + + msi_irq_size = 256; + } else if (octeon_is_pci_host()) { + if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, + 0, "MSI[0:15]", octeon_msi_interrupt0)) + panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0, + 0, "MSI[16:31]", octeon_msi_interrupt0)) + panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0, + 0, "MSI[32:47]", octeon_msi_interrupt0)) + panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); + + if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0, + 0, "MSI[48:63]", octeon_msi_interrupt0)) + panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); + msi_irq_size = 64; + } + return 0; +} +subsys_initcall(octeon_msi_initialize); diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c new file mode 100644 index 000000000..dc6dc2741 --- /dev/null +++ b/arch/mips/pci/ops-bcm63xx.c @@ -0,0 +1,528 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon + */ + +#include +#include +#include +#include +#include + +#include "pci-bcm63xx.h" + +/* + * swizzle 32bits data to return only the needed part + */ +static int postprocess_read(u32 data, int where, unsigned int size) +{ + u32 ret; + + ret = 0; + switch (size) { + case 1: + ret = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + ret = (data >> ((where & 3) << 3)) & 0xffff; + break; + case 4: + ret = data; + break; + } + return ret; +} + +static int preprocess_write(u32 orig_data, u32 val, int where, + unsigned int size) +{ + u32 ret; + + ret = 0; + switch (size) { + case 1: + ret = (orig_data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 2: + ret = (orig_data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 4: + ret = val; + break; + } + return ret; +} + +/* + * setup hardware for a configuration cycle with given parameters + */ +static int bcm63xx_setup_cfg_access(int type, unsigned int busn, + unsigned int devfn, int where) +{ + unsigned int slot, func, reg; + u32 val; + + slot = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + reg = where >> 2; + + /* sanity check */ + if (slot > (MPI_L2PCFG_DEVNUM_MASK >> MPI_L2PCFG_DEVNUM_SHIFT)) + return 1; + + if (func > (MPI_L2PCFG_FUNC_MASK >> MPI_L2PCFG_FUNC_SHIFT)) + return 1; + + if (reg > (MPI_L2PCFG_REG_MASK >> MPI_L2PCFG_REG_SHIFT)) + return 1; + + /* ok, setup config access */ + val = (reg << MPI_L2PCFG_REG_SHIFT); + val |= (func << MPI_L2PCFG_FUNC_SHIFT); + val |= (slot << MPI_L2PCFG_DEVNUM_SHIFT); + val |= MPI_L2PCFG_CFG_USEREG_MASK; + val |= MPI_L2PCFG_CFG_SEL_MASK; + /* type 0 cycle for local bus, type 1 cycle for anything else */ + if (type != 0) { + /* FIXME: how to specify bus ??? */ + val |= (1 << MPI_L2PCFG_CFG_TYPE_SHIFT); + } + bcm_mpi_writel(val, MPI_L2PCFG_REG); + + return 0; +} + +static int bcm63xx_do_cfg_read(int type, unsigned int busn, + unsigned int devfn, int where, int size, + u32 *val) +{ + u32 data; + + /* two phase cycle, first we write address, then read data at + * another location, caller already has a spinlock so no need + * to add one here */ + if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) + return PCIBIOS_DEVICE_NOT_FOUND; + iob(); + data = le32_to_cpu(__raw_readl(pci_iospace_start)); + /* restore IO space normal behaviour */ + bcm_mpi_writel(0, MPI_L2PCFG_REG); + + *val = postprocess_read(data, where, size); + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm63xx_do_cfg_write(int type, unsigned int busn, + unsigned int devfn, int where, int size, + u32 val) +{ + u32 data; + + /* two phase cycle, first we write address, then write data to + * another location, caller already has a spinlock so no need + * to add one here */ + if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) + return PCIBIOS_DEVICE_NOT_FOUND; + iob(); + + data = le32_to_cpu(__raw_readl(pci_iospace_start)); + data = preprocess_write(data, val, where, size); + + __raw_writel(cpu_to_le32(data), pci_iospace_start); + wmb(); + /* no way to know the access is done, we have to wait */ + udelay(500); + /* restore IO space normal behaviour */ + bcm_mpi_writel(0, MPI_L2PCFG_REG); + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm63xx_pci_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + int type; + + type = bus->parent ? 1 : 0; + + if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) + return PCIBIOS_DEVICE_NOT_FOUND; + + return bcm63xx_do_cfg_read(type, bus->number, devfn, + where, size, val); +} + +static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + int type; + + type = bus->parent ? 1 : 0; + + if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) + return PCIBIOS_DEVICE_NOT_FOUND; + + return bcm63xx_do_cfg_write(type, bus->number, devfn, + where, size, val); +} + +struct pci_ops bcm63xx_pci_ops = { + .read = bcm63xx_pci_read, + .write = bcm63xx_pci_write +}; + +#ifdef CONFIG_CARDBUS +/* + * emulate configuration read access on a cardbus bridge + */ +#define FAKE_CB_BRIDGE_SLOT 0x1e + +static int fake_cb_bridge_bus_number = -1; + +static struct { + u16 pci_command; + u8 cb_latency; + u8 subordinate_busn; + u8 cardbus_busn; + u8 pci_busn; + int bus_assigned; + u16 bridge_control; + + u32 mem_base0; + u32 mem_limit0; + u32 mem_base1; + u32 mem_limit1; + + u32 io_base0; + u32 io_limit0; + u32 io_base1; + u32 io_limit1; +} fake_cb_bridge_regs; + +static int fake_cb_bridge_read(int where, int size, u32 *val) +{ + unsigned int reg; + u32 data; + + data = 0; + reg = where >> 2; + switch (reg) { + case (PCI_VENDOR_ID >> 2): + case (PCI_CB_SUBSYSTEM_VENDOR_ID >> 2): + /* create dummy vendor/device id from our cpu id */ + data = (bcm63xx_get_cpu_id() << 16) | PCI_VENDOR_ID_BROADCOM; + break; + + case (PCI_COMMAND >> 2): + data = (PCI_STATUS_DEVSEL_SLOW << 16); + data |= fake_cb_bridge_regs.pci_command; + break; + + case (PCI_CLASS_REVISION >> 2): + data = (PCI_CLASS_BRIDGE_CARDBUS << 16); + break; + + case (PCI_CACHE_LINE_SIZE >> 2): + data = (PCI_HEADER_TYPE_CARDBUS << 16); + break; + + case (PCI_INTERRUPT_LINE >> 2): + /* bridge control */ + data = (fake_cb_bridge_regs.bridge_control << 16); + /* pin:intA line:0xff */ + data |= (0x1 << 8) | 0xff; + break; + + case (PCI_CB_PRIMARY_BUS >> 2): + data = (fake_cb_bridge_regs.cb_latency << 24); + data |= (fake_cb_bridge_regs.subordinate_busn << 16); + data |= (fake_cb_bridge_regs.cardbus_busn << 8); + data |= fake_cb_bridge_regs.pci_busn; + break; + + case (PCI_CB_MEMORY_BASE_0 >> 2): + data = fake_cb_bridge_regs.mem_base0; + break; + + case (PCI_CB_MEMORY_LIMIT_0 >> 2): + data = fake_cb_bridge_regs.mem_limit0; + break; + + case (PCI_CB_MEMORY_BASE_1 >> 2): + data = fake_cb_bridge_regs.mem_base1; + break; + + case (PCI_CB_MEMORY_LIMIT_1 >> 2): + data = fake_cb_bridge_regs.mem_limit1; + break; + + case (PCI_CB_IO_BASE_0 >> 2): + /* | 1 for 32bits io support */ + data = fake_cb_bridge_regs.io_base0 | 0x1; + break; + + case (PCI_CB_IO_LIMIT_0 >> 2): + data = fake_cb_bridge_regs.io_limit0; + break; + + case (PCI_CB_IO_BASE_1 >> 2): + /* | 1 for 32bits io support */ + data = fake_cb_bridge_regs.io_base1 | 0x1; + break; + + case (PCI_CB_IO_LIMIT_1 >> 2): + data = fake_cb_bridge_regs.io_limit1; + break; + } + + *val = postprocess_read(data, where, size); + return PCIBIOS_SUCCESSFUL; +} + +/* + * emulate configuration write access on a cardbus bridge + */ +static int fake_cb_bridge_write(int where, int size, u32 val) +{ + unsigned int reg; + u32 data, tmp; + int ret; + + ret = fake_cb_bridge_read((where & ~0x3), 4, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + data = preprocess_write(data, val, where, size); + + reg = where >> 2; + switch (reg) { + case (PCI_COMMAND >> 2): + fake_cb_bridge_regs.pci_command = (data & 0xffff); + break; + + case (PCI_CB_PRIMARY_BUS >> 2): + fake_cb_bridge_regs.cb_latency = (data >> 24) & 0xff; + fake_cb_bridge_regs.subordinate_busn = (data >> 16) & 0xff; + fake_cb_bridge_regs.cardbus_busn = (data >> 8) & 0xff; + fake_cb_bridge_regs.pci_busn = data & 0xff; + if (fake_cb_bridge_regs.cardbus_busn) + fake_cb_bridge_regs.bus_assigned = 1; + break; + + case (PCI_INTERRUPT_LINE >> 2): + tmp = (data >> 16) & 0xffff; + /* disable memory prefetch support */ + tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; + tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; + fake_cb_bridge_regs.bridge_control = tmp; + break; + + case (PCI_CB_MEMORY_BASE_0 >> 2): + fake_cb_bridge_regs.mem_base0 = data; + break; + + case (PCI_CB_MEMORY_LIMIT_0 >> 2): + fake_cb_bridge_regs.mem_limit0 = data; + break; + + case (PCI_CB_MEMORY_BASE_1 >> 2): + fake_cb_bridge_regs.mem_base1 = data; + break; + + case (PCI_CB_MEMORY_LIMIT_1 >> 2): + fake_cb_bridge_regs.mem_limit1 = data; + break; + + case (PCI_CB_IO_BASE_0 >> 2): + fake_cb_bridge_regs.io_base0 = data; + break; + + case (PCI_CB_IO_LIMIT_0 >> 2): + fake_cb_bridge_regs.io_limit0 = data; + break; + + case (PCI_CB_IO_BASE_1 >> 2): + fake_cb_bridge_regs.io_base1 = data; + break; + + case (PCI_CB_IO_LIMIT_1 >> 2): + fake_cb_bridge_regs.io_limit1 = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + /* snoop access to slot 0x1e on root bus, we fake a cardbus + * bridge at this location */ + if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { + fake_cb_bridge_bus_number = bus->number; + return fake_cb_bridge_read(where, size, val); + } + + /* a configuration cycle for the device behind the cardbus + * bridge is actually done as a type 0 cycle on the primary + * bus. This means that only one device can be on the cardbus + * bus */ + if (fake_cb_bridge_regs.bus_assigned && + bus->number == fake_cb_bridge_regs.cardbus_busn && + PCI_SLOT(devfn) == 0) + return bcm63xx_do_cfg_read(0, 0, + PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), + where, size, val); + + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { + fake_cb_bridge_bus_number = bus->number; + return fake_cb_bridge_write(where, size, val); + } + + if (fake_cb_bridge_regs.bus_assigned && + bus->number == fake_cb_bridge_regs.cardbus_busn && + PCI_SLOT(devfn) == 0) + return bcm63xx_do_cfg_write(0, 0, + PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), + where, size, val); + + return PCIBIOS_DEVICE_NOT_FOUND; +} + +struct pci_ops bcm63xx_cb_ops = { + .read = bcm63xx_cb_read, + .write = bcm63xx_cb_write, +}; + +/* + * only one IO window, so it cannot be shared by PCI and cardbus, use + * fixup to choose and detect unhandled configuration + */ +static void bcm63xx_fixup(struct pci_dev *dev) +{ + static int io_window = -1; + int i, found, new_io_window; + u32 val; + + /* look for any io resource */ + found = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + found = 1; + break; + } + } + + if (!found) + return; + + /* skip our fake bus with only cardbus bridge on it */ + if (dev->bus->number == fake_cb_bridge_bus_number) + return; + + /* find on which bus the device is */ + if (fake_cb_bridge_regs.bus_assigned && + dev->bus->number == fake_cb_bridge_regs.cardbus_busn && + PCI_SLOT(dev->devfn) == 0) + new_io_window = 1; + else + new_io_window = 0; + + if (new_io_window == io_window) + return; + + if (io_window != -1) { + printk(KERN_ERR "bcm63xx: both PCI and cardbus devices " + "need IO, which hardware cannot do\n"); + return; + } + + printk(KERN_INFO "bcm63xx: PCI IO window assigned to %s\n", + (new_io_window == 0) ? "PCI" : "cardbus"); + + val = bcm_mpi_readl(MPI_L2PIOREMAP_REG); + if (io_window) + val |= MPI_L2PREMAP_IS_CARDBUS_MASK; + else + val &= ~MPI_L2PREMAP_IS_CARDBUS_MASK; + bcm_mpi_writel(val, MPI_L2PIOREMAP_REG); + + io_window = new_io_window; +} + +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup); +#endif + +static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn) +{ + switch (bus->number) { + case PCIE_BUS_BRIDGE: + return PCI_SLOT(devfn) == 0; + case PCIE_BUS_DEVICE: + if (PCI_SLOT(devfn) == 0) + return bcm_pcie_readl(PCIE_DLSTATUS_REG) + & DLSTATUS_PHYLINKUP; + fallthrough; + default: + return false; + } +} + +static int bcm63xx_pcie_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + u32 reg = where & ~3; + + if (!bcm63xx_pcie_can_access(bus, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == PCIE_BUS_DEVICE) + reg += PCIE_DEVICE_OFFSET; + + data = bcm_pcie_readl(reg); + + *val = postprocess_read(data, where, size); + + return PCIBIOS_SUCCESSFUL; + +} + +static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 reg = where & ~3; + + if (!bcm63xx_pcie_can_access(bus, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == PCIE_BUS_DEVICE) + reg += PCIE_DEVICE_OFFSET; + + + data = bcm_pcie_readl(reg); + + data = preprocess_write(data, val, where, size); + bcm_pcie_writel(data, reg); + + return PCIBIOS_SUCCESSFUL; +} + + +struct pci_ops bcm63xx_pcie_ops = { + .read = bcm63xx_pcie_read, + .write = bcm63xx_pcie_write +}; diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c new file mode 100644 index 000000000..4d5fe614f --- /dev/null +++ b/arch/mips/pci/ops-bonito64.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * + * MIPS boards specific PCI support. + */ +#include +#include +#include + +#include + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset)) +#define ID_SEL_BEGIN 10 +#define MAX_DEV_NUM (31 - ID_SEL_BEGIN) + + +static int bonito64_pcibios_config_access(unsigned char access_type, + struct pci_bus *bus, + unsigned int devfn, int where, + u32 * data) +{ + u32 busnum = bus->number; + u32 addr, type; + u32 dummy; + void *addrp; + int device = PCI_SLOT(devfn); + int function = PCI_FUNC(devfn); + int reg = where & ~3; + + if (busnum == 0) { + /* Type 0 configuration for onboard PCI bus */ + if (device > MAX_DEV_NUM) + return -1; + + addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg; + type = 0; + } else { + /* Type 1 configuration for offboard PCI bus */ + addr = (busnum << 16) | (device << 11) | (function << 8) | reg; + type = 0x10000; + } + + /* Clear aborts */ + BONITO_PCICMD |= BONITO_PCICMD_MABORT_CLR | BONITO_PCICMD_MTABORT_CLR; + + BONITO_PCIMAP_CFG = (addr >> 16) | type; + + /* Flush Bonito register block */ + dummy = BONITO_PCIMAP_CFG; + mmiowb(); + + addrp = CFG_SPACE_REG(addr & 0xffff); + if (access_type == PCI_ACCESS_WRITE) { + writel(cpu_to_le32(*data), addrp); + /* Wait till done */ + while (BONITO_PCIMSTAT & 0xF); + } else { + *data = le32_to_cpu(readl(addrp)); + } + + /* Detect Master/Target abort */ + if (BONITO_PCICMD & (BONITO_PCICMD_MABORT_CLR | + BONITO_PCICMD_MTABORT_CLR)) { + /* Error occurred */ + + /* Clear bits */ + BONITO_PCICMD |= (BONITO_PCICMD_MABORT_CLR | + BONITO_PCICMD_MTABORT_CLR); + + return -1; + } + + return 0; + +} + + +/* + * We can't address 8 and 16 bit words directly. Instead we have to + * read/write a 32bit word and mask/modify the data we actually want. + */ +static int bonito64_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, + &data)) + return -1; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + data = val; + else { + if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, + where, &data)) + return -1; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (bonito64_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where, + &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops bonito64_pci_ops = { + .read = bonito64_pcibios_read, + .write = bonito64_pcibios_write +}; diff --git a/arch/mips/pci/ops-gt64xxx_pci0.c b/arch/mips/pci/ops-gt64xxx_pci0.c new file mode 100644 index 000000000..501dcdf5a --- /dev/null +++ b/arch/mips/pci/ops-gt64xxx_pci0.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + */ +#include +#include +#include + +#include + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +/* + * PCI configuration cycle AD bus definition + */ +/* Type 0 */ +#define PCI_CFG_TYPE0_REG_SHF 0 +#define PCI_CFG_TYPE0_FUNC_SHF 8 + +/* Type 1 */ +#define PCI_CFG_TYPE1_REG_SHF 0 +#define PCI_CFG_TYPE1_FUNC_SHF 8 +#define PCI_CFG_TYPE1_DEV_SHF 11 +#define PCI_CFG_TYPE1_BUS_SHF 16 + +static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type, + struct pci_bus *bus, unsigned int devfn, int where, u32 * data) +{ + unsigned char busnum = bus->number; + u32 intr; + + if ((busnum == 0) && (devfn >= PCI_DEVFN(31, 0))) + return -1; /* Because of a bug in the galileo (for slot 31). */ + + /* Clear cause register bits */ + GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT | + GT_INTRCAUSE_TARABORT0_BIT)); + + /* Setup address */ + GT_WRITE(GT_PCI0_CFGADDR_OFS, + (busnum << GT_PCI0_CFGADDR_BUSNUM_SHF) | + (devfn << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | + ((where / 4) << GT_PCI0_CFGADDR_REGNUM_SHF) | + GT_PCI0_CFGADDR_CONFIGEN_BIT); + + if (access_type == PCI_ACCESS_WRITE) { + if (busnum == 0 && PCI_SLOT(devfn) == 0) { + /* + * The Galileo system controller is acting + * differently than other devices. + */ + GT_WRITE(GT_PCI0_CFGDATA_OFS, *data); + } else + __GT_WRITE(GT_PCI0_CFGDATA_OFS, *data); + } else { + if (busnum == 0 && PCI_SLOT(devfn) == 0) { + /* + * The Galileo system controller is acting + * differently than other devices. + */ + *data = GT_READ(GT_PCI0_CFGDATA_OFS); + } else + *data = __GT_READ(GT_PCI0_CFGDATA_OFS); + } + + /* Check for master or target abort */ + intr = GT_READ(GT_INTRCAUSE_OFS); + + if (intr & (GT_INTRCAUSE_MASABORT0_BIT | GT_INTRCAUSE_TARABORT0_BIT)) { + /* Error occurred */ + + /* Clear bits */ + GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT | + GT_INTRCAUSE_TARABORT0_BIT)); + + return -1; + } + + return 0; +} + + +/* + * We can't address 8 and 16 bit words directly. Instead we have to + * read/write a 32bit word and mask/modify the data we actually want. + */ +static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, + where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if (size == 4) + data = val; + else { + if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, + devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, + where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops gt64xxx_pci0_ops = { + .read = gt64xxx_pci0_pcibios_read, + .write = gt64xxx_pci0_pcibios_write +}; diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c new file mode 100644 index 000000000..7d7135539 --- /dev/null +++ b/arch/mips/pci/ops-lantiq.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pci-lantiq.h" + +#define LTQ_PCI_CFG_BUSNUM_SHF 16 +#define LTQ_PCI_CFG_DEVNUM_SHF 11 +#define LTQ_PCI_CFG_FUNNUM_SHF 8 + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, + unsigned int devfn, unsigned int where, u32 *data) +{ + unsigned long cfg_base; + unsigned long flags; + u32 temp; + + /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the + SoC itself */ + if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) + || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) + return 1; + + spin_lock_irqsave(&ebu_lock, flags); + + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << + LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); + + /* Perform access */ + if (access_type == PCI_ACCESS_WRITE) { + ltq_w32(swab32(*data), ((u32 *)cfg_base)); + } else { + *data = ltq_r32(((u32 *)(cfg_base))); + *data = swab32(*data); + } + wmb(); + + /* clean possible Master abort */ + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; + temp = ltq_r32(((u32 *)(cfg_base))); + temp = swab32(temp); + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; + ltq_w32(temp, ((u32 *)cfg_base)); + + spin_unlock_irqrestore(&ebu_lock, flags); + + if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) + return 1; + + return 0; +} + +int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data = 0; + + if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if (size == 4) { + data = val; + } else { + if (ltq_pci_config_access(PCI_ACCESS_READ, bus, + devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c new file mode 100644 index 000000000..0d1b36ba1 --- /dev/null +++ b/arch/mips/pci/ops-loongson2.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * + * Copyright (C) 2009 Lemote Inc. + * Author: Wu Zhangjin + */ +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CS5536 +#include +#include +#endif + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +#define CFG_SPACE_REG(offset) \ + (void *)CKSEG1ADDR(LOONGSON_PCICFG_BASE | (offset)) +#define ID_SEL_BEGIN 11 +#define MAX_DEV_NUM (31 - ID_SEL_BEGIN) + + +static int loongson_pcibios_config_access(unsigned char access_type, + struct pci_bus *bus, + unsigned int devfn, int where, + u32 *data) +{ + u32 busnum = bus->number; + u32 addr, type; + u32 dummy; + void *addrp; + int device = PCI_SLOT(devfn); + int function = PCI_FUNC(devfn); + int reg = where & ~3; + + if (busnum == 0) { + /* board-specific part,currently,only fuloong2f,yeeloong2f + * use CS5536, fuloong2e use via686b, gdium has no + * south bridge + */ +#ifdef CONFIG_CS5536 + /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to + * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO, + * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it + * will not go this branch, but the others. so, no calling dead + * loop here. + */ + if ((PCI_IDSEL_CS5536 == device) && (reg < PCI_MSR_CTRL)) { + switch (access_type) { + case PCI_ACCESS_READ: + *data = cs5536_pci_conf_read4(function, reg); + break; + case PCI_ACCESS_WRITE: + cs5536_pci_conf_write4(function, reg, *data); + break; + } + return 0; + } +#endif + /* Type 0 configuration for onboard PCI bus */ + if (device > MAX_DEV_NUM) + return -1; + + addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg; + type = 0; + } else { + /* Type 1 configuration for offboard PCI bus */ + addr = (busnum << 16) | (device << 11) | (function << 8) | reg; + type = 0x10000; + } + + /* Clear aborts */ + LOONGSON_PCICMD |= LOONGSON_PCICMD_MABORT_CLR | \ + LOONGSON_PCICMD_MTABORT_CLR; + + LOONGSON_PCIMAP_CFG = (addr >> 16) | type; + + /* Flush Bonito register block */ + dummy = LOONGSON_PCIMAP_CFG; + mmiowb(); + + addrp = CFG_SPACE_REG(addr & 0xffff); + if (access_type == PCI_ACCESS_WRITE) + writel(cpu_to_le32(*data), addrp); + else + *data = le32_to_cpu(readl(addrp)); + + /* Detect Master/Target abort */ + if (LOONGSON_PCICMD & (LOONGSON_PCICMD_MABORT_CLR | + LOONGSON_PCICMD_MTABORT_CLR)) { + /* Error occurred */ + + /* Clear bits */ + LOONGSON_PCICMD |= (LOONGSON_PCICMD_MABORT_CLR | + LOONGSON_PCICMD_MTABORT_CLR); + + return -1; + } + + return 0; + +} + + +/* + * We can't address 8 and 16 bit words directly. Instead we have to + * read/write a 32bit word and mask/modify the data we actually want. + */ +static int loongson_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, + &data)) + return -1; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int loongson_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + data = val; + else { + if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, + where, &data)) + return -1; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (loongson_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where, + &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops loongson_pci_ops = { + .read = loongson_pcibios_read, + .write = loongson_pcibios_write +}; + +#ifdef CONFIG_CS5536 +DEFINE_RAW_SPINLOCK(msr_lock); + +void _rdmsr(u32 msr, u32 *hi, u32 *lo) +{ + struct pci_bus bus = { + .number = PCI_BUS_CS5536 + }; + u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); + unsigned long flags; + + raw_spin_lock_irqsave(&msr_lock, flags); + loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); + loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); + loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); + raw_spin_unlock_irqrestore(&msr_lock, flags); +} +EXPORT_SYMBOL(_rdmsr); + +void _wrmsr(u32 msr, u32 hi, u32 lo) +{ + struct pci_bus bus = { + .number = PCI_BUS_CS5536 + }; + u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); + unsigned long flags; + + raw_spin_lock_irqsave(&msr_lock, flags); + loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); + loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); + loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); + raw_spin_unlock_irqrestore(&msr_lock, flags); +} +EXPORT_SYMBOL(_wrmsr); +#endif diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c new file mode 100644 index 000000000..951d8070f --- /dev/null +++ b/arch/mips/pci/ops-mace.c @@ -0,0 +1,100 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 2001 Keith M Wesolowski + */ +#include +#include +#include +#include + +#if 0 +# define DPRINTK(args...) printk(args); +#else +# define DPRINTK(args...) +#endif + +/* + * O2 has up to 5 PCI devices connected into the MACE bridge. The device + * map looks like this: + * + * 0 aic7xxx 0 + * 1 aic7xxx 1 + * 2 expansion slot + * 3 N/C + * 4 N/C + */ + +static inline int mkaddr(struct pci_bus *bus, unsigned int devfn, + unsigned int reg) +{ + return ((bus->number & 0xff) << 16) | + ((devfn & 0xff) << 8) | + (reg & 0xfc); +} + + +static int +mace_pci_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + u32 control = mace->pci.control; + + /* disable master aborts interrupts during config read */ + mace->pci.control = control & ~MACEPCI_CONTROL_MAR_INT; + mace->pci.config_addr = mkaddr(bus, devfn, reg); + switch (size) { + case 1: + *val = mace->pci.config_data.b[(reg & 3) ^ 3]; + break; + case 2: + *val = mace->pci.config_data.w[((reg >> 1) & 1) ^ 1]; + break; + case 4: + *val = mace->pci.config_data.l; + break; + } + /* ack possible master abort */ + mace->pci.error &= ~MACEPCI_ERROR_MASTER_ABORT; + mace->pci.control = control; + /* + * someone forgot to set the ultra bit for the onboard + * scsi chips; we fake it here + */ + if (bus->number == 0 && reg == 0x40 && size == 4 && + (devfn == (1 << 3) || devfn == (2 << 3))) + *val |= 0x1000; + + DPRINTK("read%d: reg=%08x,val=%02x\n", size * 8, reg, *val); + + return PCIBIOS_SUCCESSFUL; +} + +static int +mace_pci_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + mace->pci.config_addr = mkaddr(bus, devfn, reg); + switch (size) { + case 1: + mace->pci.config_data.b[(reg & 3) ^ 3] = val; + break; + case 2: + mace->pci.config_data.w[((reg >> 1) & 1) ^ 1] = val; + break; + case 4: + mace->pci.config_data.l = val; + break; + } + + DPRINTK("write%d: reg=%08x,val=%02x\n", size * 8, reg, val); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops mace_pci_ops = { + .read = mace_pci_read_config, + .write = mace_pci_write_config, +}; diff --git a/arch/mips/pci/ops-msc.c b/arch/mips/pci/ops-msc.c new file mode 100644 index 000000000..1f438baaf --- /dev/null +++ b/arch/mips/pci/ops-msc.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) + * + * MIPS boards specific PCI support. + */ +#include +#include +#include + +#include + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +/* + * PCI configuration cycle AD bus definition + */ +/* Type 0 */ +#define PCI_CFG_TYPE0_REG_SHF 0 +#define PCI_CFG_TYPE0_FUNC_SHF 8 + +/* Type 1 */ +#define PCI_CFG_TYPE1_REG_SHF 0 +#define PCI_CFG_TYPE1_FUNC_SHF 8 +#define PCI_CFG_TYPE1_DEV_SHF 11 +#define PCI_CFG_TYPE1_BUS_SHF 16 + +static int msc_pcibios_config_access(unsigned char access_type, + struct pci_bus *bus, unsigned int devfn, int where, u32 * data) +{ + unsigned char busnum = bus->number; + u32 intr; + + /* Clear status register bits. */ + MSC_WRITE(MSC01_PCI_INTSTAT, + (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)); + + MSC_WRITE(MSC01_PCI_CFGADDR, + ((busnum << MSC01_PCI_CFGADDR_BNUM_SHF) | + (PCI_SLOT(devfn) << MSC01_PCI_CFGADDR_DNUM_SHF) | + (PCI_FUNC(devfn) << MSC01_PCI_CFGADDR_FNUM_SHF) | + ((where / 4) << MSC01_PCI_CFGADDR_RNUM_SHF))); + + /* Perform access */ + if (access_type == PCI_ACCESS_WRITE) + MSC_WRITE(MSC01_PCI_CFGDATA, *data); + else + MSC_READ(MSC01_PCI_CFGDATA, *data); + + /* Detect Master/Target abort */ + MSC_READ(MSC01_PCI_INTSTAT, intr); + if (intr & (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)) { + /* Error occurred */ + + /* Clear bits */ + MSC_WRITE(MSC01_PCI_INTSTAT, + (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)); + + return -1; + } + + return 0; +} + + +/* + * We can't address 8 and 16 bit words directly. Instead we have to + * read/write a 32bit word and mask/modify the data we actually want. + */ +static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, + &data)) + return -1; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + data = val; + else { + if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, + where, &data)) + return -1; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (msc_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where, + &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops msc_pci_ops = { + .read = msc_pcibios_read, + .write = msc_pcibios_write +}; diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c new file mode 100644 index 000000000..874ed6df9 --- /dev/null +++ b/arch/mips/pci/ops-rc32434.c @@ -0,0 +1,206 @@ +/* + * BRIEF MODULE DESCRIPTION + * pci_ops for IDT EB434 board + * + * Copyright 2004 IDT Inc. (rischelp@idt.com) + * Copyright 2006 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include + +#include +#include +#include + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + + +#define PCI_CFG_SET(bus, slot, func, off) \ + (rc32434_pci->pcicfga = (0x80000000 | \ + ((bus) << 16) | ((slot)<<11) | \ + ((func)<<8) | (off))) + +static inline int config_access(unsigned char access_type, + struct pci_bus *bus, unsigned int devfn, + unsigned char where, u32 *data) +{ + unsigned int slot = PCI_SLOT(devfn); + u8 func = PCI_FUNC(devfn); + + /* Setup address */ + PCI_CFG_SET(bus->number, slot, func, where); + rc32434_sync(); + + if (access_type == PCI_ACCESS_WRITE) + rc32434_pci->pcicfgd = *data; + else + *data = rc32434_pci->pcicfgd; + + rc32434_sync(); + + return 0; +} + + +/* + * We can't address 8 and 16 bit words directly. Instead we have to + * read/write a 32bit word and mask/modify the data we actually want. + */ +static int read_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 *val) +{ + u32 data; + int ret; + + ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); + *val = (data >> ((where & 3) << 3)) & 0xff; + return ret; +} + +static int read_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 *val) +{ + u32 data; + int ret; + + ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); + *val = (data >> ((where & 3) << 3)) & 0xffff; + return ret; +} + +static int read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 *val) +{ + int ret; + int delay = 1; + + /* + * Don't scan too far, else there will be errors with plugged in + * daughterboard (rb564). + */ + if (bus->number == 0 && PCI_SLOT(devfn) > 21) + return 0; + +retry: + ret = config_access(PCI_ACCESS_READ, bus, devfn, where, val); + + /* + * Certain devices react delayed at device scan time, this + * gives them time to settle + */ + if (where == PCI_VENDOR_ID) { + if (ret == 0xffffffff || ret == 0x00000000 || + ret == 0x0000ffff || ret == 0xffff0000) { + if (delay > 4) + return 0; + delay *= 2; + msleep(delay); + goto retry; + } + } + + return ret; +} + +static int +write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, + u8 val) +{ + u32 data = 0; + + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return -1; + + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + + +static int +write_config_word(struct pci_bus *bus, unsigned int devfn, int where, + u16 val) +{ + u32 data = 0; + + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return -1; + + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return -1; + + + return PCIBIOS_SUCCESSFUL; +} + + +static int +write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, + u32 val) +{ + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + switch (size) { + case 1: + return read_config_byte(bus, devfn, where, (u8 *) val); + case 2: + return read_config_word(bus, devfn, where, (u16 *) val); + default: + return read_config_dword(bus, devfn, where, val); + } +} + +static int pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + switch (size) { + case 1: + return write_config_byte(bus, devfn, where, (u8) val); + case 2: + return write_config_word(bus, devfn, where, (u16) val); + default: + return write_config_dword(bus, devfn, where, val); + } +} + +struct pci_ops rc32434_pci_ops = { + .read = pci_config_read, + .write = pci_config_write, +}; diff --git a/arch/mips/pci/ops-sni.c b/arch/mips/pci/ops-sni.c new file mode 100644 index 000000000..35daa7fe6 --- /dev/null +++ b/arch/mips/pci/ops-sni.c @@ -0,0 +1,164 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SNI specific PCI support for RM200/RM300. + * + * Copyright (C) 1997 - 2000, 2003 Ralf Baechle + */ +#include +#include +#include +#include + +/* + * It seems that on the RM200 only lower 3 bits of the 5 bit PCI device + * address are decoded. We therefore manually have to reject attempts at + * reading outside this range. Being on the paranoid side we only do this + * test for bus 0 and hope forwarding and decoding work properly for any + * subordinated busses. + * + * ASIC PCI only supports type 1 config cycles. + */ +static int set_config_address(unsigned int busno, unsigned int devfn, int reg) +{ + if ((devfn > 255) || (reg > 255)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (busno == 0 && devfn >= PCI_DEVFN(8, 0)) + return PCIBIOS_DEVICE_NOT_FOUND; + + *(volatile u32 *)PCIMT_CONFIG_ADDRESS = + ((busno & 0xff) << 16) | + ((devfn & 0xff) << 8) | + (reg & 0xfc); + + return PCIBIOS_SUCCESSFUL; +} + +static int pcimt_read(struct pci_bus *bus, unsigned int devfn, int reg, + int size, u32 * val) +{ + int res; + + if ((res = set_config_address(bus->number, devfn, reg))) + return res; + + switch (size) { + case 1: + *val = inb(PCIMT_CONFIG_DATA + (reg & 3)); + break; + case 2: + *val = inw(PCIMT_CONFIG_DATA + (reg & 2)); + break; + case 4: + *val = inl(PCIMT_CONFIG_DATA); + break; + } + + return 0; +} + +static int pcimt_write(struct pci_bus *bus, unsigned int devfn, int reg, + int size, u32 val) +{ + int res; + + if ((res = set_config_address(bus->number, devfn, reg))) + return res; + + switch (size) { + case 1: + outb(val, PCIMT_CONFIG_DATA + (reg & 3)); + break; + case 2: + outw(val, PCIMT_CONFIG_DATA + (reg & 2)); + break; + case 4: + outl(val, PCIMT_CONFIG_DATA); + break; + } + + return 0; +} + +struct pci_ops sni_pcimt_ops = { + .read = pcimt_read, + .write = pcimt_write, +}; + +static int pcit_set_config_address(unsigned int busno, unsigned int devfn, int reg) +{ + if ((devfn > 255) || (reg > 255) || (busno > 255)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + outl((1 << 31) | ((busno & 0xff) << 16) | ((devfn & 0xff) << 8) | (reg & 0xfc), 0xcf8); + return PCIBIOS_SUCCESSFUL; +} + +static int pcit_read(struct pci_bus *bus, unsigned int devfn, int reg, + int size, u32 * val) +{ + int res; + + /* + * on bus 0 we need to check, whether there is a device answering + * for the devfn by doing a config write and checking the result. If + * we don't do it, we will get a data bus error + */ + if (bus->number == 0) { + pcit_set_config_address(0, 0, 0x68); + outl(inl(0xcfc) | 0xc0000000, 0xcfc); + if ((res = pcit_set_config_address(0, devfn, 0))) + return res; + outl(0xffffffff, 0xcfc); + pcit_set_config_address(0, 0, 0x68); + if (inl(0xcfc) & 0x100000) + return PCIBIOS_DEVICE_NOT_FOUND; + } + if ((res = pcit_set_config_address(bus->number, devfn, reg))) + return res; + + switch (size) { + case 1: + *val = inb(PCIMT_CONFIG_DATA + (reg & 3)); + break; + case 2: + *val = inw(PCIMT_CONFIG_DATA + (reg & 2)); + break; + case 4: + *val = inl(PCIMT_CONFIG_DATA); + break; + } + return 0; +} + +static int pcit_write(struct pci_bus *bus, unsigned int devfn, int reg, + int size, u32 val) +{ + int res; + + if ((res = pcit_set_config_address(bus->number, devfn, reg))) + return res; + + switch (size) { + case 1: + outb(val, PCIMT_CONFIG_DATA + (reg & 3)); + break; + case 2: + outw(val, PCIMT_CONFIG_DATA + (reg & 2)); + break; + case 4: + outl(val, PCIMT_CONFIG_DATA); + break; + } + + return 0; +} + + +struct pci_ops sni_pcit_ops = { + .read = pcit_read, + .write = pcit_write, +}; diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c new file mode 100644 index 000000000..f7802f100 --- /dev/null +++ b/arch/mips/pci/ops-tx4927.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc. + * + * Based on linux/arch/mips/pci/ops-tx4938.c, + * linux/arch/mips/pci/fixup-rbtx4938.c, + * linux/arch/mips/txx9/rbtx4938/setup.c, + * and RBTX49xx patch from CELF patch archive. + * + * 2003-2005 (c) MontaVista Software, Inc. + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 + */ +#include +#include +#include +#include +#include + +static struct { + struct pci_controller *channel; + struct tx4927_pcic_reg __iomem *pcicptr; +} pcicptrs[2]; /* TX4938 has 2 pcic */ + +static void __init set_tx4927_pcicptr(struct pci_controller *channel, + struct tx4927_pcic_reg __iomem *pcicptr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) { + if (pcicptrs[i].channel == channel) { + pcicptrs[i].pcicptr = pcicptr; + return; + } + } + for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) { + if (!pcicptrs[i].channel) { + pcicptrs[i].channel = channel; + pcicptrs[i].pcicptr = pcicptr; + return; + } + } + BUG(); +} + +struct tx4927_pcic_reg __iomem *get_tx4927_pcicptr( + struct pci_controller *channel) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) { + if (pcicptrs[i].channel == channel) + return pcicptrs[i].pcicptr; + } + return NULL; +} + +static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where, + struct tx4927_pcic_reg __iomem *pcicptr) +{ + if (bus->parent == NULL && + devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0)) + return -1; + __raw_writel(((bus->number & 0xff) << 0x10) + | ((devfn & 0xff) << 0x08) | (where & 0xfc) + | (bus->parent ? 1 : 0), + &pcicptr->g2pcfgadrs); + /* clear M_ABORT and Disable M_ABORT Int. */ + __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff) + | (PCI_STATUS_REC_MASTER_ABORT << 16), + &pcicptr->pcistatus); + return 0; +} + +static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr) +{ + int code = PCIBIOS_SUCCESSFUL; + + /* wait write cycle completion before checking error status */ + while (__raw_readl(&pcicptr->pcicstatus) & TX4927_PCIC_PCICSTATUS_IWB) + ; + if (__raw_readl(&pcicptr->pcistatus) + & (PCI_STATUS_REC_MASTER_ABORT << 16)) { + __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff) + | (PCI_STATUS_REC_MASTER_ABORT << 16), + &pcicptr->pcistatus); + /* flush write buffer */ + iob(); + code = PCIBIOS_DEVICE_NOT_FOUND; + } + return code; +} + +static u8 icd_readb(int offset, struct tx4927_pcic_reg __iomem *pcicptr) +{ +#ifdef __BIG_ENDIAN + offset ^= 3; +#endif + return __raw_readb((void __iomem *)&pcicptr->g2pcfgdata + offset); +} +static u16 icd_readw(int offset, struct tx4927_pcic_reg __iomem *pcicptr) +{ +#ifdef __BIG_ENDIAN + offset ^= 2; +#endif + return __raw_readw((void __iomem *)&pcicptr->g2pcfgdata + offset); +} +static u32 icd_readl(struct tx4927_pcic_reg __iomem *pcicptr) +{ + return __raw_readl(&pcicptr->g2pcfgdata); +} +static void icd_writeb(u8 val, int offset, + struct tx4927_pcic_reg __iomem *pcicptr) +{ +#ifdef __BIG_ENDIAN + offset ^= 3; +#endif + __raw_writeb(val, (void __iomem *)&pcicptr->g2pcfgdata + offset); +} +static void icd_writew(u16 val, int offset, + struct tx4927_pcic_reg __iomem *pcicptr) +{ +#ifdef __BIG_ENDIAN + offset ^= 2; +#endif + __raw_writew(val, (void __iomem *)&pcicptr->g2pcfgdata + offset); +} +static void icd_writel(u32 val, struct tx4927_pcic_reg __iomem *pcicptr) +{ + __raw_writel(val, &pcicptr->g2pcfgdata); +} + +static struct tx4927_pcic_reg __iomem *pci_bus_to_pcicptr(struct pci_bus *bus) +{ + struct pci_controller *channel = bus->sysdata; + return get_tx4927_pcicptr(channel); +} + +static int tx4927_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus); + + if (mkaddr(bus, devfn, where, pcicptr)) { + *val = 0xffffffff; + return -1; + } + switch (size) { + case 1: + *val = icd_readb(where & 3, pcicptr); + break; + case 2: + *val = icd_readw(where & 3, pcicptr); + break; + default: + *val = icd_readl(pcicptr); + } + return check_abort(pcicptr); +} + +static int tx4927_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus); + + if (mkaddr(bus, devfn, where, pcicptr)) + return -1; + switch (size) { + case 1: + icd_writeb(val, where & 3, pcicptr); + break; + case 2: + icd_writew(val, where & 3, pcicptr); + break; + default: + icd_writel(val, pcicptr); + } + return check_abort(pcicptr); +} + +static struct pci_ops tx4927_pci_ops = { + .read = tx4927_pci_config_read, + .write = tx4927_pci_config_write, +}; + +static struct { + u8 trdyto; + u8 retryto; + u16 gbwc; +} tx4927_pci_opts = { + .trdyto = 0, + .retryto = 0, + .gbwc = 0xfe0, /* 4064 GBUSCLK for CCFG.GTOT=0b11 */ +}; + +char *tx4927_pcibios_setup(char *str) +{ + if (!strncmp(str, "trdyto=", 7)) { + u8 val = 0; + if (kstrtou8(str + 7, 0, &val) == 0) + tx4927_pci_opts.trdyto = val; + return NULL; + } + if (!strncmp(str, "retryto=", 8)) { + u8 val = 0; + if (kstrtou8(str + 8, 0, &val) == 0) + tx4927_pci_opts.retryto = val; + return NULL; + } + if (!strncmp(str, "gbwc=", 5)) { + u16 val; + if (kstrtou16(str + 5, 0, &val) == 0) + tx4927_pci_opts.gbwc = val; + return NULL; + } + return str; +} + +void __init tx4927_pcic_setup(struct tx4927_pcic_reg __iomem *pcicptr, + struct pci_controller *channel, int extarb) +{ + int i; + unsigned long flags; + + set_tx4927_pcicptr(channel, pcicptr); + + if (!channel->pci_ops) + printk(KERN_INFO + "PCIC -- DID:%04x VID:%04x RID:%02x Arbiter:%s\n", + __raw_readl(&pcicptr->pciid) >> 16, + __raw_readl(&pcicptr->pciid) & 0xffff, + __raw_readl(&pcicptr->pciccrev) & 0xff, + extarb ? "External" : "Internal"); + channel->pci_ops = &tx4927_pci_ops; + + local_irq_save(flags); + + /* Disable All Initiator Space */ + __raw_writel(__raw_readl(&pcicptr->pciccfg) + & ~(TX4927_PCIC_PCICCFG_G2PMEN(0) + | TX4927_PCIC_PCICCFG_G2PMEN(1) + | TX4927_PCIC_PCICCFG_G2PMEN(2) + | TX4927_PCIC_PCICCFG_G2PIOEN), + &pcicptr->pciccfg); + + /* GB->PCI mappings */ + __raw_writel((channel->io_resource->end - channel->io_resource->start) + >> 4, + &pcicptr->g2piomask); + ____raw_writeq((channel->io_resource->start + + channel->io_map_base - IO_BASE) | +#ifdef __BIG_ENDIAN + TX4927_PCIC_G2PIOGBASE_ECHG +#else + TX4927_PCIC_G2PIOGBASE_BSDIS +#endif + , &pcicptr->g2piogbase); + ____raw_writeq(channel->io_resource->start - channel->io_offset, + &pcicptr->g2piopbase); + for (i = 0; i < 3; i++) { + __raw_writel(0, &pcicptr->g2pmmask[i]); + ____raw_writeq(0, &pcicptr->g2pmgbase[i]); + ____raw_writeq(0, &pcicptr->g2pmpbase[i]); + } + if (channel->mem_resource->end) { + __raw_writel((channel->mem_resource->end + - channel->mem_resource->start) >> 4, + &pcicptr->g2pmmask[0]); + ____raw_writeq(channel->mem_resource->start | +#ifdef __BIG_ENDIAN + TX4927_PCIC_G2PMnGBASE_ECHG +#else + TX4927_PCIC_G2PMnGBASE_BSDIS +#endif + , &pcicptr->g2pmgbase[0]); + ____raw_writeq(channel->mem_resource->start - + channel->mem_offset, + &pcicptr->g2pmpbase[0]); + } + /* PCI->GB mappings (I/O 256B) */ + __raw_writel(0, &pcicptr->p2giopbase); /* 256B */ + ____raw_writeq(0, &pcicptr->p2giogbase); + /* PCI->GB mappings (MEM 512MB (64MB on R1.x)) */ + __raw_writel(0, &pcicptr->p2gm0plbase); + __raw_writel(0, &pcicptr->p2gm0pubase); + ____raw_writeq(TX4927_PCIC_P2GMnGBASE_TMEMEN | +#ifdef __BIG_ENDIAN + TX4927_PCIC_P2GMnGBASE_TECHG +#else + TX4927_PCIC_P2GMnGBASE_TBSDIS +#endif + , &pcicptr->p2gmgbase[0]); + /* PCI->GB mappings (MEM 16MB) */ + __raw_writel(0xffffffff, &pcicptr->p2gm1plbase); + __raw_writel(0xffffffff, &pcicptr->p2gm1pubase); + ____raw_writeq(0, &pcicptr->p2gmgbase[1]); + /* PCI->GB mappings (MEM 1MB) */ + __raw_writel(0xffffffff, &pcicptr->p2gm2pbase); /* 1MB */ + ____raw_writeq(0, &pcicptr->p2gmgbase[2]); + + /* Clear all (including IRBER) except for GBWC */ + __raw_writel((tx4927_pci_opts.gbwc << 16) + & TX4927_PCIC_PCICCFG_GBWC_MASK, + &pcicptr->pciccfg); + /* Enable Initiator Memory Space */ + if (channel->mem_resource->end) + __raw_writel(__raw_readl(&pcicptr->pciccfg) + | TX4927_PCIC_PCICCFG_G2PMEN(0), + &pcicptr->pciccfg); + /* Enable Initiator I/O Space */ + if (channel->io_resource->end) + __raw_writel(__raw_readl(&pcicptr->pciccfg) + | TX4927_PCIC_PCICCFG_G2PIOEN, + &pcicptr->pciccfg); + /* Enable Initiator Config */ + __raw_writel(__raw_readl(&pcicptr->pciccfg) + | TX4927_PCIC_PCICCFG_ICAEN | TX4927_PCIC_PCICCFG_TCAR, + &pcicptr->pciccfg); + + /* Do not use MEMMUL, MEMINF: YMFPCI card causes M_ABORT. */ + __raw_writel(0, &pcicptr->pcicfg1); + + __raw_writel((__raw_readl(&pcicptr->g2ptocnt) & ~0xffff) + | (tx4927_pci_opts.trdyto & 0xff) + | ((tx4927_pci_opts.retryto & 0xff) << 8), + &pcicptr->g2ptocnt); + + /* Clear All Local Bus Status */ + __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus); + /* Enable All Local Bus Interrupts */ + __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicmask); + /* Clear All Initiator Status */ + __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus); + /* Enable All Initiator Interrupts */ + __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pmask); + /* Clear All PCI Status Error */ + __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff) + | (TX4927_PCIC_PCISTATUS_ALL << 16), + &pcicptr->pcistatus); + /* Enable All PCI Status Error Interrupts */ + __raw_writel(TX4927_PCIC_PCISTATUS_ALL, &pcicptr->pcimask); + + if (!extarb) { + /* Reset Bus Arbiter */ + __raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg); + __raw_writel(0, &pcicptr->pbabm); + /* Enable Bus Arbiter */ + __raw_writel(TX4927_PCIC_PBACFG_PBAEN, &pcicptr->pbacfg); + } + + __raw_writel(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY + | PCI_COMMAND_PARITY | PCI_COMMAND_SERR, + &pcicptr->pcistatus); + local_irq_restore(flags); + + printk(KERN_DEBUG + "PCI: COMMAND=%04x,PCIMASK=%04x," + "TRDYTO=%02x,RETRYTO=%02x,GBWC=%03x\n", + __raw_readl(&pcicptr->pcistatus) & 0xffff, + __raw_readl(&pcicptr->pcimask) & 0xffff, + __raw_readl(&pcicptr->g2ptocnt) & 0xff, + (__raw_readl(&pcicptr->g2ptocnt) & 0xff00) >> 8, + (__raw_readl(&pcicptr->pciccfg) >> 16) & 0xfff); +} + +static void tx4927_report_pcic_status1(struct tx4927_pcic_reg __iomem *pcicptr) +{ + __u16 pcistatus = (__u16)(__raw_readl(&pcicptr->pcistatus) >> 16); + __u32 g2pstatus = __raw_readl(&pcicptr->g2pstatus); + __u32 pcicstatus = __raw_readl(&pcicptr->pcicstatus); + static struct { + __u32 flag; + const char *str; + } pcistat_tbl[] = { + { PCI_STATUS_DETECTED_PARITY, "DetectedParityError" }, + { PCI_STATUS_SIG_SYSTEM_ERROR, "SignaledSystemError" }, + { PCI_STATUS_REC_MASTER_ABORT, "ReceivedMasterAbort" }, + { PCI_STATUS_REC_TARGET_ABORT, "ReceivedTargetAbort" }, + { PCI_STATUS_SIG_TARGET_ABORT, "SignaledTargetAbort" }, + { PCI_STATUS_PARITY, "MasterParityError" }, + }, g2pstat_tbl[] = { + { TX4927_PCIC_G2PSTATUS_TTOE, "TIOE" }, + { TX4927_PCIC_G2PSTATUS_RTOE, "RTOE" }, + }, pcicstat_tbl[] = { + { TX4927_PCIC_PCICSTATUS_PME, "PME" }, + { TX4927_PCIC_PCICSTATUS_TLB, "TLB" }, + { TX4927_PCIC_PCICSTATUS_NIB, "NIB" }, + { TX4927_PCIC_PCICSTATUS_ZIB, "ZIB" }, + { TX4927_PCIC_PCICSTATUS_PERR, "PERR" }, + { TX4927_PCIC_PCICSTATUS_SERR, "SERR" }, + { TX4927_PCIC_PCICSTATUS_GBE, "GBE" }, + { TX4927_PCIC_PCICSTATUS_IWB, "IWB" }, + }; + int i, cont; + + printk(KERN_ERR ""); + if (pcistatus & TX4927_PCIC_PCISTATUS_ALL) { + printk(KERN_CONT "pcistat:%04x(", pcistatus); + for (i = 0, cont = 0; i < ARRAY_SIZE(pcistat_tbl); i++) + if (pcistatus & pcistat_tbl[i].flag) + printk(KERN_CONT "%s%s", + cont++ ? " " : "", pcistat_tbl[i].str); + printk(KERN_CONT ") "); + } + if (g2pstatus & TX4927_PCIC_G2PSTATUS_ALL) { + printk(KERN_CONT "g2pstatus:%08x(", g2pstatus); + for (i = 0, cont = 0; i < ARRAY_SIZE(g2pstat_tbl); i++) + if (g2pstatus & g2pstat_tbl[i].flag) + printk(KERN_CONT "%s%s", + cont++ ? " " : "", g2pstat_tbl[i].str); + printk(KERN_CONT ") "); + } + if (pcicstatus & TX4927_PCIC_PCICSTATUS_ALL) { + printk(KERN_CONT "pcicstatus:%08x(", pcicstatus); + for (i = 0, cont = 0; i < ARRAY_SIZE(pcicstat_tbl); i++) + if (pcicstatus & pcicstat_tbl[i].flag) + printk(KERN_CONT "%s%s", + cont++ ? " " : "", pcicstat_tbl[i].str); + printk(KERN_CONT ")"); + } + printk(KERN_CONT "\n"); +} + +void tx4927_report_pcic_status(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) { + if (pcicptrs[i].pcicptr) + tx4927_report_pcic_status1(pcicptrs[i].pcicptr); + } +} + +static void tx4927_dump_pcic_settings1(struct tx4927_pcic_reg __iomem *pcicptr) +{ + int i; + __u32 __iomem *preg = (__u32 __iomem *)pcicptr; + + printk(KERN_INFO "tx4927 pcic (0x%p) settings:", pcicptr); + for (i = 0; i < sizeof(struct tx4927_pcic_reg); i += 4, preg++) { + if (i % 32 == 0) { + printk(KERN_CONT "\n"); + printk(KERN_INFO "%04x:", i); + } + /* skip registers with side-effects */ + if (i == offsetof(struct tx4927_pcic_reg, g2pintack) + || i == offsetof(struct tx4927_pcic_reg, g2pspc) + || i == offsetof(struct tx4927_pcic_reg, g2pcfgadrs) + || i == offsetof(struct tx4927_pcic_reg, g2pcfgdata)) { + printk(KERN_CONT " XXXXXXXX"); + continue; + } + printk(KERN_CONT " %08x", __raw_readl(preg)); + } + printk(KERN_CONT "\n"); +} + +void tx4927_dump_pcic_settings(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) { + if (pcicptrs[i].pcicptr) + tx4927_dump_pcic_settings1(pcicptrs[i].pcicptr); + } +} + +irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id) +{ + struct pt_regs *regs = get_irq_regs(); + struct tx4927_pcic_reg __iomem *pcicptr = + (struct tx4927_pcic_reg __iomem *)(unsigned long)dev_id; + + if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) { + printk(KERN_WARNING "PCIERR interrupt at 0x%0*lx\n", + (int)(2 * sizeof(unsigned long)), regs->cp0_epc); + tx4927_report_pcic_status1(pcicptr); + } + if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) { + /* clear all pci errors */ + __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff) + | (TX4927_PCIC_PCISTATUS_ALL << 16), + &pcicptr->pcistatus); + __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus); + __raw_writel(TX4927_PCIC_PBASTATUS_ALL, &pcicptr->pbastatus); + __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus); + return IRQ_HANDLED; + } + console_verbose(); + tx4927_dump_pcic_settings1(pcicptr); + panic("PCI error."); +} + +#ifdef CONFIG_TOSHIBA_FPCIB0 +static void tx4927_quirk_slc90e66_bridge(struct pci_dev *dev) +{ + struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus); + + if (!pcicptr) + return; + if (__raw_readl(&pcicptr->pbacfg) & TX4927_PCIC_PBACFG_PBAEN) { + /* Reset Bus Arbiter */ + __raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg); + /* + * swap reqBP and reqXP (raise priority of SLC90E66). + * SLC90E66(PCI-ISA bridge) is connected to REQ2 on + * PCI Backplane board. + */ + __raw_writel(0x72543610, &pcicptr->pbareqport); + __raw_writel(0, &pcicptr->pbabm); + /* Use Fixed ParkMaster (required by SLC90E66) */ + __raw_writel(TX4927_PCIC_PBACFG_FIXPA, &pcicptr->pbacfg); + /* Enable Bus Arbiter */ + __raw_writel(TX4927_PCIC_PBACFG_FIXPA | + TX4927_PCIC_PBACFG_PBAEN, + &pcicptr->pbacfg); + printk(KERN_INFO "PCI: Use Fixed Park Master (REQPORT %08x)\n", + __raw_readl(&pcicptr->pbareqport)); + } +} +#define PCI_DEVICE_ID_EFAR_SLC90E66_0 0x9460 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_0, + tx4927_quirk_slc90e66_bridge); +#endif diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c new file mode 100644 index 000000000..1c722dd0c --- /dev/null +++ b/arch/mips/pci/pci-alchemy.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Alchemy PCI host mode support. + * + * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * + * Support for all devices (greater than 16) added by David Gathright. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for dma_default_coherent */ + +#include +#include + +#ifdef CONFIG_PCI_DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) do {} while (0) +#endif + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +struct alchemy_pci_context { + struct pci_controller alchemy_pci_ctrl; /* leave as first member! */ + void __iomem *regs; /* ctrl base */ + /* tools for wired entry for config space access */ + unsigned long last_elo0; + unsigned long last_elo1; + int wired_entry; + struct vm_struct *pci_cfg_vm; + + unsigned long pm[12]; + + int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin); + int (*board_pci_idsel)(unsigned int devsel, int assert); +}; + +/* for syscore_ops. There's only one PCI controller on Alchemy chips, so this + * should suffice for now. + */ +static struct alchemy_pci_context *__alchemy_pci_ctx; + + +/* IO/MEM resources for PCI. Keep the memres in sync with fixup_bigphys_addr + * in arch/mips/alchemy/common/setup.c + */ +static struct resource alchemy_pci_def_memres = { + .start = ALCHEMY_PCI_MEMWIN_START, + .end = ALCHEMY_PCI_MEMWIN_END, + .name = "PCI memory space", + .flags = IORESOURCE_MEM +}; + +static struct resource alchemy_pci_def_iores = { + .start = ALCHEMY_PCI_IOWIN_START, + .end = ALCHEMY_PCI_IOWIN_END, + .name = "PCI IO space", + .flags = IORESOURCE_IO +}; + +static void mod_wired_entry(int entry, unsigned long entrylo0, + unsigned long entrylo1, unsigned long entryhi, + unsigned long pagemask) +{ + unsigned long old_pagemask; + unsigned long old_ctx; + + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID; + old_pagemask = read_c0_pagemask(); + write_c0_index(entry); + write_c0_pagemask(pagemask); + write_c0_entryhi(entryhi); + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + tlb_write_indexed(); + write_c0_entryhi(old_ctx); + write_c0_pagemask(old_pagemask); +} + +static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx) +{ + ctx->wired_entry = read_c0_wired(); + add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); + ctx->last_elo0 = ctx->last_elo1 = ~0; +} + +static int config_access(unsigned char access_type, struct pci_bus *bus, + unsigned int dev_fn, unsigned char where, u32 *data) +{ + struct alchemy_pci_context *ctx = bus->sysdata; + unsigned int device = PCI_SLOT(dev_fn); + unsigned int function = PCI_FUNC(dev_fn); + unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r; + int error = PCIBIOS_SUCCESSFUL; + + if (device > 19) { + *data = 0xffffffff; + return -1; + } + + local_irq_save(flags); + r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; + r |= PCI_STATCMD_STATUS(0x2000); + __raw_writel(r, ctx->regs + PCI_REG_STATCMD); + wmb(); + + /* Allow board vendors to implement their own off-chip IDSEL. + * If it doesn't succeed, may as well bail out at this point. + */ + if (ctx->board_pci_idsel(device, 1) == 0) { + *data = 0xffffffff; + local_irq_restore(flags); + return -1; + } + + /* Setup the config window */ + if (bus->number == 0) + cfg_base = (1 << device) << 11; + else + cfg_base = 0x80000000 | (bus->number << 16) | (device << 11); + + /* Setup the lower bits of the 36-bit address */ + offset = (function << 8) | (where & ~0x3); + /* Pick up any address that falls below the page mask */ + offset |= cfg_base & ~PAGE_MASK; + + /* Page boundary */ + cfg_base = cfg_base & PAGE_MASK; + + /* To improve performance, if the current device is the same as + * the last device accessed, we don't touch the TLB. + */ + entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7; + entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7; + if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) { + mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1, + (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); + ctx->last_elo0 = entryLo0; + ctx->last_elo1 = entryLo1; + } + + if (access_type == PCI_ACCESS_WRITE) + __raw_writel(*data, ctx->pci_cfg_vm->addr + offset); + else + *data = __raw_readl(ctx->pci_cfg_vm->addr + offset); + wmb(); + + DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n", + access_type, bus->number, device, where, *data, offset); + + /* check for errors, master abort */ + status = __raw_readl(ctx->regs + PCI_REG_STATCMD); + if (status & (1 << 29)) { + *data = 0xffffffff; + error = -1; + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", + access_type, bus->number, device); + } else if ((status >> 28) & 0xf) { + DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", + device, (status >> 28) & 0xf); + + /* clear errors */ + __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD); + + *data = 0xffffffff; + error = -1; + } + + /* Take away the IDSEL. */ + (void)ctx->board_pci_idsel(device, 0); + + local_irq_restore(flags); + return error; +} + +static int read_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 *val) +{ + u32 data; + int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); + + if (where & 1) + data >>= 8; + if (where & 2) + data >>= 16; + *val = data & 0xff; + return ret; +} + +static int read_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 *val) +{ + u32 data; + int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); + + if (where & 2) + data >>= 16; + *val = data & 0xffff; + return ret; +} + +static int read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 *val) +{ + return config_access(PCI_ACCESS_READ, bus, devfn, where, val); +} + +static int write_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 val) +{ + u32 data = 0; + + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return -1; + + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +static int write_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 val) +{ + u32 data = 0; + + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return -1; + + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return -1; + + return PCIBIOS_SUCCESSFUL; +} + +static int write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 val) +{ + return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val); +} + +static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + switch (size) { + case 1: { + u8 _val; + int rc = read_config_byte(bus, devfn, where, &_val); + + *val = _val; + return rc; + } + case 2: { + u16 _val; + int rc = read_config_word(bus, devfn, where, &_val); + + *val = _val; + return rc; + } + default: + return read_config_dword(bus, devfn, where, val); + } +} + +static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + switch (size) { + case 1: + return write_config_byte(bus, devfn, where, (u8) val); + case 2: + return write_config_word(bus, devfn, where, (u16) val); + default: + return write_config_dword(bus, devfn, where, val); + } +} + +static struct pci_ops alchemy_pci_ops = { + .read = alchemy_pci_read, + .write = alchemy_pci_write, +}; + +static int alchemy_pci_def_idsel(unsigned int devsel, int assert) +{ + return 1; /* success */ +} + +/* save PCI controller register contents. */ +static int alchemy_pci_suspend(void) +{ + struct alchemy_pci_context *ctx = __alchemy_pci_ctx; + if (!ctx) + return 0; + + ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM); + ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff; + ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH); + ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID); + ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID); + ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV); + ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL); + ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID); + ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV); + ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM); + ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR); + ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT); + + return 0; +} + +static void alchemy_pci_resume(void) +{ + struct alchemy_pci_context *ctx = __alchemy_pci_ctx; + if (!ctx) + return; + + __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM); + __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH); + __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID); + __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID); + __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV); + __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL); + __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID); + __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV); + __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM); + __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR); + __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT); + wmb(); + __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG); + wmb(); + + /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired + * on resume, making it necessary to recreate it as soon as possible. + */ + ctx->wired_entry = 8191; /* impossibly high value */ + alchemy_pci_wired_entry(ctx); /* install it */ +} + +static struct syscore_ops alchemy_pci_pmops = { + .suspend = alchemy_pci_suspend, + .resume = alchemy_pci_resume, +}; + +static int alchemy_pci_probe(struct platform_device *pdev) +{ + struct alchemy_pci_platdata *pd = pdev->dev.platform_data; + struct alchemy_pci_context *ctx; + void __iomem *virt_io; + unsigned long val; + struct resource *r; + struct clk *c; + int ret; + + /* need at least PCI IRQ mapping table */ + if (!pd) { + dev_err(&pdev->dev, "need platform data for PCI setup\n"); + ret = -ENODEV; + goto out; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + dev_err(&pdev->dev, "no memory for pcictl context\n"); + ret = -ENOMEM; + goto out; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no pcictl ctrl regs resource\n"); + ret = -ENODEV; + goto out1; + } + + if (!request_mem_region(r->start, resource_size(r), pdev->name)) { + dev_err(&pdev->dev, "cannot claim pci regs\n"); + ret = -ENODEV; + goto out1; + } + + c = clk_get(&pdev->dev, "pci_clko"); + if (IS_ERR(c)) { + dev_err(&pdev->dev, "unable to find PCI clock\n"); + ret = PTR_ERR(c); + goto out2; + } + + ret = clk_prepare_enable(c); + if (ret) { + dev_err(&pdev->dev, "cannot enable PCI clock\n"); + goto out6; + } + + ctx->regs = ioremap(r->start, resource_size(r)); + if (!ctx->regs) { + dev_err(&pdev->dev, "cannot map pci regs\n"); + ret = -ENODEV; + goto out5; + } + + /* map parts of the PCI IO area */ + /* REVISIT: if this changes with a newer variant (doubt it) make this + * a platform resource. + */ + virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000); + if (!virt_io) { + dev_err(&pdev->dev, "cannot remap pci io space\n"); + ret = -ENODEV; + goto out3; + } + ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; + + /* Au1500 revisions older than AD have borked coherent PCI */ + if (alchemy_get_cputype() == ALCHEMY_CPU_AU1500 && + read_c0_prid() < 0x01030202 && !dma_default_coherent) { + val = __raw_readl(ctx->regs + PCI_REG_CONFIG); + val |= PCI_CONFIG_NC; + __raw_writel(val, ctx->regs + PCI_REG_CONFIG); + wmb(); + dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n"); + } + + if (pd->board_map_irq) + ctx->board_map_irq = pd->board_map_irq; + + if (pd->board_pci_idsel) + ctx->board_pci_idsel = pd->board_pci_idsel; + else + ctx->board_pci_idsel = alchemy_pci_def_idsel; + + /* fill in relevant pci_controller members */ + ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops; + ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres; + ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores; + + /* we can't ioremap the entire pci config space because it's too large, + * nor can we dynamically ioremap it because some drivers use the + * PCI config routines from within atomic contex and that becomes a + * problem in get_vm_area(). Instead we use one wired TLB entry to + * handle all config accesses for all busses. + */ + ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); + if (!ctx->pci_cfg_vm) { + dev_err(&pdev->dev, "unable to get vm area\n"); + ret = -ENOMEM; + goto out4; + } + ctx->wired_entry = 8191; /* impossibly high value */ + alchemy_pci_wired_entry(ctx); /* install it */ + + set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); + + /* board may want to modify bits in the config register, do it now */ + val = __raw_readl(ctx->regs + PCI_REG_CONFIG); + val &= ~pd->pci_cfg_clr; + val |= pd->pci_cfg_set; + val &= ~PCI_CONFIG_PD; /* clear disable bit */ + __raw_writel(val, ctx->regs + PCI_REG_CONFIG); + wmb(); + + __alchemy_pci_ctx = ctx; + platform_set_drvdata(pdev, ctx); + register_syscore_ops(&alchemy_pci_pmops); + register_pci_controller(&ctx->alchemy_pci_ctrl); + + dev_info(&pdev->dev, "PCI controller at %ld MHz\n", + clk_get_rate(c) / 1000000); + + return 0; + +out4: + iounmap(virt_io); +out3: + iounmap(ctx->regs); +out5: + clk_disable_unprepare(c); +out6: + clk_put(c); +out2: + release_mem_region(r->start, resource_size(r)); +out1: + kfree(ctx); +out: + return ret; +} + +static struct platform_driver alchemy_pcictl_driver = { + .probe = alchemy_pci_probe, + .driver = { + .name = "alchemy-pci", + }, +}; + +static int __init alchemy_pci_init(void) +{ + /* Au1500/Au1550 have PCI */ + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1550: + return platform_driver_register(&alchemy_pcictl_driver); + } + return 0; +} +arch_initcall(alchemy_pci_init); + + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct alchemy_pci_context *ctx = dev->sysdata; + if (ctx && ctx->board_map_irq) + return ctx->board_map_irq(dev, slot, pin); + return -1; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c new file mode 100644 index 000000000..e17d862cf --- /dev/null +++ b/arch/mips/pci/pci-ar2315.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + */ + +/* + * Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA + * and interrupt. PCI interface supports MMIO access method, but does not + * seem to support I/O ports. + * + * Read/write operation in the region 0x80000000-0xBFFFFFFF causes + * a memory read/write command on the PCI bus. 30 LSBs of address on + * the bus are taken from memory read/write request and 2 MSBs are + * determined by PCI unit configuration. + * + * To work with the configuration space instead of memory is necessary set + * the CFG_SEL bit in the PCI_MISC_CONFIG register. + * + * Devices on the bus can perform DMA requests via chip BAR1. PCI host + * controller BARs are programmend as if an external device is programmed. + * Which means that during configuration, IDSEL pin of the chip should be + * asserted. + * + * We know (and support) only one board that uses the PCI interface - + * Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the + * AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line + * and IDSEL pin of AR2315 is connected to AD[16] line. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * PCI Bus Interface Registers + */ +#define AR2315_PCI_1MS_REG 0x0008 + +#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */ + +#define AR2315_PCI_MISC_CONFIG 0x000c + +#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */ +#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */ +#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */ +#define AR2315_PCIMISC_RST_MODE 0x00000030 +#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */ +#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */ +#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */ +#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */ +#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */ +#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */ +#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */ +#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache + * disable */ + +#define AR2315_PCI_OUT_TSTAMP 0x0010 + +#define AR2315_PCI_UNCACHE_CFG 0x0014 + +#define AR2315_PCI_IN_EN 0x0100 + +#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */ +#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */ +#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */ +#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */ + +#define AR2315_PCI_IN_DIS 0x0104 + +#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */ +#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */ +#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */ +#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */ + +#define AR2315_PCI_IN_PTR 0x0200 + +#define AR2315_PCI_OUT_EN 0x0400 + +#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */ + +#define AR2315_PCI_OUT_DIS 0x0404 + +#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */ + +#define AR2315_PCI_OUT_PTR 0x0408 + +/* PCI interrupt status (write one to clear) */ +#define AR2315_PCI_ISR 0x0500 + +#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */ +#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */ +#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */ +#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */ +#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */ +#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */ +#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */ +#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */ +#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */ +#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */ +#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */ +#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */ + +/* PCI interrupt mask */ +#define AR2315_PCI_IMR 0x0504 + +/* Global PCI interrupt enable */ +#define AR2315_PCI_IER 0x0508 + +#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */ +#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */ + +#define AR2315_PCI_HOST_IN_EN 0x0800 +#define AR2315_PCI_HOST_IN_DIS 0x0804 +#define AR2315_PCI_HOST_IN_PTR 0x0810 +#define AR2315_PCI_HOST_OUT_EN 0x0900 +#define AR2315_PCI_HOST_OUT_DIS 0x0904 +#define AR2315_PCI_HOST_OUT_PTR 0x0908 + +/* + * PCI interrupts, which share IP5 + * Keep ordered according to AR2315_PCI_INT_XXX bits + */ +#define AR2315_PCI_IRQ_EXT 25 +#define AR2315_PCI_IRQ_ABORT 26 +#define AR2315_PCI_IRQ_COUNT 27 + +/* Arbitrary size of memory region to access the configuration space */ +#define AR2315_PCI_CFG_SIZE 0x00100000 + +#define AR2315_PCI_HOST_SLOT 3 +#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS) + +/* + * We need some arbitrary non-zero value to be programmed to the BAR1 register + * of PCI host controller to enable DMA. The same value should be used as the + * offset to calculate the physical address of DMA buffer for PCI devices. + */ +#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000 + +/* ??? access BAR */ +#define AR2315_PCI_HOST_MBAR0 0x10000000 +/* RAM access BAR */ +#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR +/* ??? access BAR */ +#define AR2315_PCI_HOST_MBAR2 0x30000000 + +struct ar2315_pci_ctrl { + void __iomem *cfg_mem; + void __iomem *mmr_mem; + unsigned irq; + unsigned irq_ext; + struct irq_domain *domain; + struct pci_controller pci_ctrl; + struct resource mem_res; + struct resource io_res; +}; + +static inline dma_addr_t ar2315_dev_offset(struct device *dev) +{ + if (dev && dev_is_pci(dev)) + return AR2315_PCI_HOST_SDRAM_BASEADDR; + return 0; +} + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr + ar2315_dev_offset(dev); +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr - ar2315_dev_offset(dev); +} + +static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl); +} + +static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg) +{ + return __raw_readl(apc->mmr_mem + reg); +} + +static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg, + u32 val) +{ + __raw_writel(val, apc->mmr_mem + reg); +} + +static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg, + u32 mask, u32 val) +{ + u32 ret = ar2315_pci_reg_read(apc, reg); + + ret &= ~mask; + ret |= val; + ar2315_pci_reg_write(apc, reg, ret); +} + +static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn, + int where, int size, u32 *ptr, bool write) +{ + int func = PCI_FUNC(devfn); + int dev = PCI_SLOT(devfn); + u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3); + u32 mask = 0xffffffff >> 8 * (4 - size); + u32 sh = (where & 3) * 8; + u32 value, isr; + + /* Prevent access past the remapped area */ + if (addr >= AR2315_PCI_CFG_SIZE || dev > 18) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Clear pending errors */ + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT); + /* Select Configuration access */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0, + AR2315_PCIMISC_CFG_SEL); + + mb(); /* PCI must see space change before we begin */ + + value = __raw_readl(apc->cfg_mem + addr); + + isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR); + + if (isr & AR2315_PCI_INT_ABORT) + goto exit_err; + + if (write) { + value = (value & ~(mask << sh)) | *ptr << sh; + __raw_writel(value, apc->cfg_mem + addr); + isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR); + if (isr & AR2315_PCI_INT_ABORT) + goto exit_err; + } else { + *ptr = (value >> sh) & mask; + } + + goto exit; + +exit_err: + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT); + if (!write) + *ptr = 0xffffffff; + +exit: + /* Select Memory access */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL, + 0); + + return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND : + PCIBIOS_SUCCESSFUL; +} + +static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc, + unsigned devfn, int where, u32 *val) +{ + return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val, + false); +} + +static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc, + unsigned devfn, int where, u32 val) +{ + return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val, + true); +} + +static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where, + int size, u32 *value) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus); + + if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT) + return PCIBIOS_DEVICE_NOT_FOUND; + + return ar2315_pci_cfg_access(apc, devfn, where, size, value, false); +} + +static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where, + int size, u32 value) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus); + + if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT) + return PCIBIOS_DEVICE_NOT_FOUND; + + return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true); +} + +static struct pci_ops ar2315_pci_ops = { + .read = ar2315_pci_cfg_read, + .write = ar2315_pci_cfg_write, +}; + +static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc) +{ + unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0); + int res; + u32 id; + + res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id); + if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID) + return -ENODEV; + + /* Program MBARs */ + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0, + AR2315_PCI_HOST_MBAR0); + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1, + AR2315_PCI_HOST_MBAR1); + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2, + AR2315_PCI_HOST_MBAR2); + + /* Run */ + ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL | + PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | + PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK); + + return 0; +} + +static void ar2315_pci_irq_handler(struct irq_desc *desc) +{ + struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); + u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & + ar2315_pci_reg_read(apc, AR2315_PCI_IMR); + int ret = 0; + + if (pending) + ret = generic_handle_domain_irq(apc->domain, __ffs(pending)); + + if (!pending || ret) + spurious_interrupt(); +} + +static void ar2315_pci_irq_mask(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0); +} + +static void ar2315_pci_irq_mask_ack(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + u32 m = BIT(d->hwirq); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0); + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m); +} + +static void ar2315_pci_irq_unmask(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq)); +} + +static struct irq_chip ar2315_pci_irq_chip = { + .name = "AR2315-PCI", + .irq_mask = ar2315_pci_irq_mask, + .irq_mask_ack = ar2315_pci_irq_mask_ack, + .irq_unmask = ar2315_pci_irq_unmask, +}; + +static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq); + irq_set_chip_data(irq, d->host_data); + return 0; +} + +static const struct irq_domain_ops ar2315_pci_irq_domain_ops = { + .map = ar2315_pci_irq_map, +}; + +static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc) +{ + ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0); + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT | + AR2315_PCI_INT_EXT), 0); + + apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT); + + irq_set_chained_handler_and_data(apc->irq, ar2315_pci_irq_handler, + apc); + + /* Clear any pending Abort or external Interrupts + * and enable interrupt processing */ + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT | + AR2315_PCI_INT_EXT); + ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE); +} + +static int ar2315_pci_probe(struct platform_device *pdev) +{ + struct ar2315_pci_ctrl *apc; + struct device *dev = &pdev->dev; + struct resource *res; + int irq, err; + + apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL); + if (!apc) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + apc->irq = irq; + + apc->mmr_mem = devm_platform_ioremap_resource_byname(pdev, + "ar2315-pci-ctrl"); + if (IS_ERR(apc->mmr_mem)) + return PTR_ERR(apc->mmr_mem); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ar2315-pci-ext"); + if (!res) + return -EINVAL; + + apc->mem_res.name = "AR2315 PCI mem space"; + apc->mem_res.parent = res; + apc->mem_res.start = res->start; + apc->mem_res.end = res->end; + apc->mem_res.flags = IORESOURCE_MEM; + + /* Remap PCI config space */ + apc->cfg_mem = devm_ioremap(dev, res->start, + AR2315_PCI_CFG_SIZE); + if (!apc->cfg_mem) { + dev_err(dev, "failed to remap PCI config space\n"); + return -ENOMEM; + } + + /* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, + AR2315_PCIMISC_RST_MODE, + AR2315_PCIRST_LOW); + msleep(100); + + /* Bring the PCI out of reset */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, + AR2315_PCIMISC_RST_MODE, + AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8); + + ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG, + 0x1E | /* 1GB uncached */ + (1 << 5) | /* Enable uncached */ + (0x2 << 30) /* Base: 0x80000000 */); + ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG); + + msleep(500); + + err = ar2315_pci_host_setup(apc); + if (err) + return err; + + apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT, + &ar2315_pci_irq_domain_ops, apc); + if (!apc->domain) { + dev_err(dev, "failed to add IRQ domain\n"); + return -ENOMEM; + } + + ar2315_pci_irq_init(apc); + + /* PCI controller does not support I/O ports */ + apc->io_res.name = "AR2315 IO space"; + apc->io_res.start = 0; + apc->io_res.end = 0; + apc->io_res.flags = IORESOURCE_IO; + + apc->pci_ctrl.pci_ops = &ar2315_pci_ops; + apc->pci_ctrl.mem_resource = &apc->mem_res; + apc->pci_ctrl.io_resource = &apc->io_res; + + register_pci_controller(&apc->pci_ctrl); + + dev_info(dev, "register PCI controller\n"); + + return 0; +} + +static struct platform_driver ar2315_pci_driver = { + .probe = ar2315_pci_probe, + .driver = { + .name = "ar2315-pci", + }, +}; + +static int __init ar2315_pci_init(void) +{ + return platform_driver_register(&ar2315_pci_driver); +} +arch_initcall(ar2315_pci_init); + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus); + + return slot ? 0 : apc->irq_ext; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c new file mode 100644 index 000000000..118760b3f --- /dev/null +++ b/arch/mips/pci/pci-ar71xx.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Atheros AR71xx PCI host controller driver + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Parts of this file are based on Atheros' 2.6.15 BSP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define AR71XX_PCI_REG_CRP_AD_CBE 0x00 +#define AR71XX_PCI_REG_CRP_WRDATA 0x04 +#define AR71XX_PCI_REG_CRP_RDDATA 0x08 +#define AR71XX_PCI_REG_CFG_AD 0x0c +#define AR71XX_PCI_REG_CFG_CBE 0x10 +#define AR71XX_PCI_REG_CFG_WRDATA 0x14 +#define AR71XX_PCI_REG_CFG_RDDATA 0x18 +#define AR71XX_PCI_REG_PCI_ERR 0x1c +#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20 +#define AR71XX_PCI_REG_AHB_ERR 0x24 +#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28 + +#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000 +#define AR71XX_PCI_CRP_CMD_READ 0x00000000 +#define AR71XX_PCI_CFG_CMD_READ 0x0000000a +#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b + +#define AR71XX_PCI_INT_CORE BIT(4) +#define AR71XX_PCI_INT_DEV2 BIT(2) +#define AR71XX_PCI_INT_DEV1 BIT(1) +#define AR71XX_PCI_INT_DEV0 BIT(0) + +#define AR71XX_PCI_IRQ_COUNT 5 + +struct ar71xx_pci_controller { + void __iomem *cfg_base; + int irq; + int irq_base; + struct pci_controller pci_ctrl; + struct resource io_res; + struct resource mem_res; +}; + +/* Byte lane enable bits */ +static const u8 ar71xx_pci_ble_table[4][4] = { + {0x0, 0xf, 0xf, 0xf}, + {0xe, 0xd, 0xb, 0x7}, + {0xc, 0xf, 0x3, 0xf}, + {0xf, 0xf, 0xf, 0xf}, +}; + +static const u32 ar71xx_pci_read_mask[8] = { + 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 +}; + +static inline u32 ar71xx_pci_get_ble(int where, int size, int local) +{ + u32 t; + + t = ar71xx_pci_ble_table[size & 3][where & 3]; + BUG_ON(t == 0xf); + t <<= (local) ? 20 : 4; + + return t; +} + +static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn, + int where) +{ + u32 ret; + + if (!bus->number) { + /* type 0 */ + ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) | + (where & ~3); + } else { + /* type 1 */ + ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) | + (PCI_FUNC(devfn) << 8) | (where & ~3) | 1; + } + + return ret; +} + +static inline struct ar71xx_pci_controller * +pci_bus_to_ar71xx_controller(struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = (struct pci_controller *) bus->sysdata; + return container_of(hose, struct ar71xx_pci_controller, pci_ctrl); +} + +static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet) +{ + void __iomem *base = apc->cfg_base; + u32 pci_err; + u32 ahb_err; + + pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3; + if (pci_err) { + if (!quiet) { + u32 addr; + + addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR); + pr_crit("ar71xx: %s bus error %d at addr 0x%x\n", + "PCI", pci_err, addr); + } + + /* clear PCI error status */ + __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR); + } + + ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1; + if (ahb_err) { + if (!quiet) { + u32 addr; + + addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR); + pr_crit("ar71xx: %s bus error %d at addr 0x%x\n", + "AHB", ahb_err, addr); + } + + /* clear AHB error status */ + __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR); + } + + return !!(ahb_err | pci_err); +} + +static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc, + int where, int size, u32 value) +{ + void __iomem *base = apc->cfg_base; + u32 ad_cbe; + + value = value << (8 * (where & 3)); + + ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3); + ad_cbe |= ar71xx_pci_get_ble(where, size, 1); + + __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE); + __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA); +} + +static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus, + unsigned int devfn, + int where, int size, u32 cmd) +{ + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); + void __iomem *base = apc->cfg_base; + u32 addr; + + addr = ar71xx_pci_bus_addr(bus, devfn, where); + + __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD); + __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0), + base + AR71XX_PCI_REG_CFG_CBE); + + return ar71xx_pci_check_error(apc, 1); +} + +static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); + void __iomem *base = apc->cfg_base; + u32 data; + int err; + int ret; + + ret = PCIBIOS_SUCCESSFUL; + data = ~0; + + err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, + AR71XX_PCI_CFG_CMD_READ); + if (err) + ret = PCIBIOS_DEVICE_NOT_FOUND; + else + data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA); + + *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7]; + + return ret; +} + +static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); + void __iomem *base = apc->cfg_base; + int err; + int ret; + + value = value << (8 * (where & 3)); + ret = PCIBIOS_SUCCESSFUL; + + err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, + AR71XX_PCI_CFG_CMD_WRITE); + if (err) + ret = PCIBIOS_DEVICE_NOT_FOUND; + else + __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA); + + return ret; +} + +static struct pci_ops ar71xx_pci_ops = { + .read = ar71xx_pci_read_config, + .write = ar71xx_pci_write_config, +}; + +static void ar71xx_pci_irq_handler(struct irq_desc *desc) +{ + struct ar71xx_pci_controller *apc; + void __iomem *base = ath79_reset_base; + u32 pending; + + apc = irq_desc_get_handler_data(desc); + + pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) & + __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); + + if (pending & AR71XX_PCI_INT_DEV0) + generic_handle_irq(apc->irq_base + 0); + + else if (pending & AR71XX_PCI_INT_DEV1) + generic_handle_irq(apc->irq_base + 1); + + else if (pending & AR71XX_PCI_INT_DEV2) + generic_handle_irq(apc->irq_base + 2); + + else if (pending & AR71XX_PCI_INT_CORE) + generic_handle_irq(apc->irq_base + 4); + + else + spurious_interrupt(); +} + +static void ar71xx_pci_irq_unmask(struct irq_data *d) +{ + struct ar71xx_pci_controller *apc; + unsigned int irq; + void __iomem *base = ath79_reset_base; + u32 t; + + apc = irq_data_get_irq_chip_data(d); + irq = d->irq - apc->irq_base; + + t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); + __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE); + + /* flush write */ + __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); +} + +static void ar71xx_pci_irq_mask(struct irq_data *d) +{ + struct ar71xx_pci_controller *apc; + unsigned int irq; + void __iomem *base = ath79_reset_base; + u32 t; + + apc = irq_data_get_irq_chip_data(d); + irq = d->irq - apc->irq_base; + + t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); + __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE); + + /* flush write */ + __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); +} + +static struct irq_chip ar71xx_pci_irq_chip = { + .name = "AR71XX PCI", + .irq_mask = ar71xx_pci_irq_mask, + .irq_unmask = ar71xx_pci_irq_unmask, + .irq_mask_ack = ar71xx_pci_irq_mask, +}; + +static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc) +{ + void __iomem *base = ath79_reset_base; + int i; + + __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE); + __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS); + + BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT); + + apc->irq_base = ATH79_PCI_IRQ_BASE; + for (i = apc->irq_base; + i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) { + irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip, + handle_level_irq); + irq_set_chip_data(i, apc); + } + + irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler, + apc); +} + +static void ar71xx_pci_reset(void) +{ + ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE); + mdelay(100); + + ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE); + mdelay(100); + + ath79_ddr_set_pci_windows(); + mdelay(100); +} + +static int ar71xx_pci_probe(struct platform_device *pdev) +{ + struct ar71xx_pci_controller *apc; + struct resource *res; + u32 t; + + apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller), + GFP_KERNEL); + if (!apc) + return -ENOMEM; + + apc->cfg_base = devm_platform_ioremap_resource_byname(pdev, + "cfg_base"); + if (IS_ERR(apc->cfg_base)) + return PTR_ERR(apc->cfg_base); + + apc->irq = platform_get_irq(pdev, 0); + if (apc->irq < 0) + return -EINVAL; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); + if (!res) + return -EINVAL; + + apc->io_res.parent = res; + apc->io_res.name = "PCI IO space"; + apc->io_res.start = res->start; + apc->io_res.end = res->end; + apc->io_res.flags = IORESOURCE_IO; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base"); + if (!res) + return -EINVAL; + + apc->mem_res.parent = res; + apc->mem_res.name = "PCI memory space"; + apc->mem_res.start = res->start; + apc->mem_res.end = res->end; + apc->mem_res.flags = IORESOURCE_MEM; + + ar71xx_pci_reset(); + + /* setup COMMAND register */ + t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE + | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK; + ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t); + + /* clear bus errors */ + ar71xx_pci_check_error(apc, 1); + + ar71xx_pci_irq_init(apc); + + apc->pci_ctrl.pci_ops = &ar71xx_pci_ops; + apc->pci_ctrl.mem_resource = &apc->mem_res; + apc->pci_ctrl.io_resource = &apc->io_res; + + register_pci_controller(&apc->pci_ctrl); + + return 0; +} + +static struct platform_driver ar71xx_pci_driver = { + .probe = ar71xx_pci_probe, + .driver = { + .name = "ar71xx-pci", + }, +}; + +static int __init ar71xx_pci_init(void) +{ + return platform_driver_register(&ar71xx_pci_driver); +} + +postcore_initcall(ar71xx_pci_init); diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c new file mode 100644 index 000000000..807558b25 --- /dev/null +++ b/arch/mips/pci/pci-ar724x.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Atheros AR724X PCI host controller driver + * + * Copyright (C) 2011 René Bolldorf + * Copyright (C) 2009-2011 Gabor Juhos + */ + +#include +#include +#include +#include +#include +#include +#include + +#define AR724X_PCI_REG_APP 0x00 +#define AR724X_PCI_REG_RESET 0x18 +#define AR724X_PCI_REG_INT_STATUS 0x4c +#define AR724X_PCI_REG_INT_MASK 0x50 + +#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0) + +#define AR724X_PCI_RESET_LINK_UP BIT(0) + +#define AR724X_PCI_INT_DEV0 BIT(14) + +#define AR724X_PCI_IRQ_COUNT 1 + +#define AR7240_BAR0_WAR_VALUE 0xffff + +#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \ + PCI_COMMAND_MASTER | \ + PCI_COMMAND_INVALIDATE | \ + PCI_COMMAND_PARITY | \ + PCI_COMMAND_SERR | \ + PCI_COMMAND_FAST_BACK) + +struct ar724x_pci_controller { + void __iomem *devcfg_base; + void __iomem *ctrl_base; + void __iomem *crp_base; + + int irq; + int irq_base; + + bool link_up; + bool bar0_is_cached; + u32 bar0_value; + + struct pci_controller pci_controller; + struct resource io_res; + struct resource mem_res; +}; + +static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc) +{ + u32 reset; + + reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET); + return reset & AR724X_PCI_RESET_LINK_UP; +} + +static inline struct ar724x_pci_controller * +pci_bus_to_ar724x_controller(struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = (struct pci_controller *) bus->sysdata; + return container_of(hose, struct ar724x_pci_controller, pci_controller); +} + +static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, + int where, int size, u32 value) +{ + void __iomem *base; + u32 data; + int s; + + WARN_ON(where & (size - 1)); + + if (!apc->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + base = apc->crp_base; + data = __raw_readl(base + (where & ~3)); + + switch (size) { + case 1: + s = ((where & 3) * 8); + data &= ~(0xff << s); + data |= ((value & 0xff) << s); + break; + case 2: + s = ((where & 2) * 8); + data &= ~(0xffff << s); + data |= ((value & 0xffff) << s); + break; + case 4: + data = value; + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + __raw_writel(data, base + (where & ~3)); + /* flush write */ + __raw_readl(base + (where & ~3)); + + return PCIBIOS_SUCCESSFUL; +} + +static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t *value) +{ + struct ar724x_pci_controller *apc; + void __iomem *base; + u32 data; + + apc = pci_bus_to_ar724x_controller(bus); + if (!apc->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (devfn) + return PCIBIOS_DEVICE_NOT_FOUND; + + base = apc->devcfg_base; + data = __raw_readl(base + (where & ~3)); + + switch (size) { + case 1: + if (where & 1) + data >>= 8; + if (where & 2) + data >>= 16; + data &= 0xff; + break; + case 2: + if (where & 2) + data >>= 16; + data &= 0xffff; + break; + case 4: + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (where == PCI_BASE_ADDRESS_0 && size == 4 && + apc->bar0_is_cached) { + /* use the cached value */ + *value = apc->bar0_value; + } else { + *value = data; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t value) +{ + struct ar724x_pci_controller *apc; + void __iomem *base; + u32 data; + int s; + + apc = pci_bus_to_ar724x_controller(bus); + if (!apc->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (devfn) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) { + if (value != 0xffffffff) { + /* + * WAR for a hw issue. If the BAR0 register of the + * device is set to the proper base address, the + * memory space of the device is not accessible. + * + * Cache the intended value so it can be read back, + * and write a SoC specific constant value to the + * BAR0 register in order to make the device memory + * accessible. + */ + apc->bar0_is_cached = true; + apc->bar0_value = value; + + value = AR7240_BAR0_WAR_VALUE; + } else { + apc->bar0_is_cached = false; + } + } + + base = apc->devcfg_base; + data = __raw_readl(base + (where & ~3)); + + switch (size) { + case 1: + s = ((where & 3) * 8); + data &= ~(0xff << s); + data |= ((value & 0xff) << s); + break; + case 2: + s = ((where & 2) * 8); + data &= ~(0xffff << s); + data |= ((value & 0xffff) << s); + break; + case 4: + data = value; + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + __raw_writel(data, base + (where & ~3)); + /* flush write */ + __raw_readl(base + (where & ~3)); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops ar724x_pci_ops = { + .read = ar724x_pci_read, + .write = ar724x_pci_write, +}; + +static void ar724x_pci_irq_handler(struct irq_desc *desc) +{ + struct ar724x_pci_controller *apc; + void __iomem *base; + u32 pending; + + apc = irq_desc_get_handler_data(desc); + base = apc->ctrl_base; + + pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) & + __raw_readl(base + AR724X_PCI_REG_INT_MASK); + + if (pending & AR724X_PCI_INT_DEV0) + generic_handle_irq(apc->irq_base + 0); + + else + spurious_interrupt(); +} + +static void ar724x_pci_irq_unmask(struct irq_data *d) +{ + struct ar724x_pci_controller *apc; + void __iomem *base; + int offset; + u32 t; + + apc = irq_data_get_irq_chip_data(d); + base = apc->ctrl_base; + offset = apc->irq_base - d->irq; + + switch (offset) { + case 0: + t = __raw_readl(base + AR724X_PCI_REG_INT_MASK); + __raw_writel(t | AR724X_PCI_INT_DEV0, + base + AR724X_PCI_REG_INT_MASK); + /* flush write */ + __raw_readl(base + AR724X_PCI_REG_INT_MASK); + } +} + +static void ar724x_pci_irq_mask(struct irq_data *d) +{ + struct ar724x_pci_controller *apc; + void __iomem *base; + int offset; + u32 t; + + apc = irq_data_get_irq_chip_data(d); + base = apc->ctrl_base; + offset = apc->irq_base - d->irq; + + switch (offset) { + case 0: + t = __raw_readl(base + AR724X_PCI_REG_INT_MASK); + __raw_writel(t & ~AR724X_PCI_INT_DEV0, + base + AR724X_PCI_REG_INT_MASK); + + /* flush write */ + __raw_readl(base + AR724X_PCI_REG_INT_MASK); + + t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS); + __raw_writel(t | AR724X_PCI_INT_DEV0, + base + AR724X_PCI_REG_INT_STATUS); + + /* flush write */ + __raw_readl(base + AR724X_PCI_REG_INT_STATUS); + } +} + +static struct irq_chip ar724x_pci_irq_chip = { + .name = "AR724X PCI ", + .irq_mask = ar724x_pci_irq_mask, + .irq_unmask = ar724x_pci_irq_unmask, + .irq_mask_ack = ar724x_pci_irq_mask, +}; + +static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc, + int id) +{ + void __iomem *base; + int i; + + base = apc->ctrl_base; + + __raw_writel(0, base + AR724X_PCI_REG_INT_MASK); + __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS); + + apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT); + + for (i = apc->irq_base; + i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) { + irq_set_chip_and_handler(i, &ar724x_pci_irq_chip, + handle_level_irq); + irq_set_chip_data(i, apc); + } + + irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler, + apc); +} + +static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc) +{ + u32 ppl, app; + int wait = 0; + + /* deassert PCIe host controller and PCIe PHY reset */ + ath79_device_reset_clear(AR724X_RESET_PCIE); + ath79_device_reset_clear(AR724X_RESET_PCIE_PHY); + + /* remove the reset of the PCIE PLL */ + ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG); + ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET; + ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl); + + /* deassert bypass for the PCIE PLL */ + ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG); + ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS; + ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl); + + /* set PCIE Application Control to ready */ + app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP); + app |= AR724X_PCI_APP_LTSSM_ENABLE; + __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP); + + /* wait up to 100ms for PHY link up */ + do { + mdelay(10); + wait++; + } while (wait < 10 && !ar724x_pci_check_link(apc)); +} + +static int ar724x_pci_probe(struct platform_device *pdev) +{ + struct ar724x_pci_controller *apc; + struct resource *res; + int id; + + id = pdev->id; + if (id == -1) + id = 0; + + apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller), + GFP_KERNEL); + if (!apc) + return -ENOMEM; + + apc->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_base"); + if (IS_ERR(apc->ctrl_base)) + return PTR_ERR(apc->ctrl_base); + + apc->devcfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg_base"); + if (IS_ERR(apc->devcfg_base)) + return PTR_ERR(apc->devcfg_base); + + apc->crp_base = devm_platform_ioremap_resource_byname(pdev, "crp_base"); + if (IS_ERR(apc->crp_base)) + return PTR_ERR(apc->crp_base); + + apc->irq = platform_get_irq(pdev, 0); + if (apc->irq < 0) + return -EINVAL; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); + if (!res) + return -EINVAL; + + apc->io_res.parent = res; + apc->io_res.name = "PCI IO space"; + apc->io_res.start = res->start; + apc->io_res.end = res->end; + apc->io_res.flags = IORESOURCE_IO; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base"); + if (!res) + return -EINVAL; + + apc->mem_res.parent = res; + apc->mem_res.name = "PCI memory space"; + apc->mem_res.start = res->start; + apc->mem_res.end = res->end; + apc->mem_res.flags = IORESOURCE_MEM; + + apc->pci_controller.pci_ops = &ar724x_pci_ops; + apc->pci_controller.io_resource = &apc->io_res; + apc->pci_controller.mem_resource = &apc->mem_res; + + /* + * Do the full PCIE Root Complex Initialization Sequence if the PCIe + * host controller is in reset. + */ + if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE) + ar724x_pci_hw_init(apc); + + apc->link_up = ar724x_pci_check_link(apc); + if (!apc->link_up) + dev_warn(&pdev->dev, "PCIe link is down\n"); + + ar724x_pci_irq_init(apc, id); + + ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT); + + register_pci_controller(&apc->pci_controller); + + return 0; +} + +static struct platform_driver ar724x_pci_driver = { + .probe = ar724x_pci_probe, + .driver = { + .name = "ar724x-pci", + }, +}; + +static int __init ar724x_pci_init(void) +{ + return platform_driver_register(&ar724x_pci_driver); +} + +postcore_initcall(ar724x_pci_init); diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c new file mode 100644 index 000000000..db0d4d22d --- /dev/null +++ b/arch/mips/pci/pci-bcm1480.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2001,2002,2005 Broadcom Corporation + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + */ + +/* + * BCM1x80/1x55-specific PCI support + * + * This module provides the glue between Linux's PCI subsystem + * and the hardware. We basically provide glue for accessing + * configuration space, and set up the translation for I/O + * space accesses. + * + * To access configuration space, we use ioremap. In the 32-bit + * kernel, this consumes either 4 or 8 page table pages, and 16MB of + * kernel mapped memory. Hopefully neither of these should be a huge + * problem. + * + * XXX: AT THIS TIME, ONLY the NATIVE PCI-X INTERFACE IS SUPPORTED. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Macros for calculating offsets into config space given a device + * structure or dev/fun/reg + */ +#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where)) +#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where) + +static void *cfg_space; + +#define PCI_BUS_ENABLED 1 +#define PCI_DEVICE_MODE 2 + +static int bcm1480_bus_status; + +#define PCI_BRIDGE_DEVICE 0 + +/* + * Read/write 32-bit values in config space. + */ +static inline u32 READCFG32(u32 addr) +{ + return *(u32 *)(cfg_space + (addr&~3)); +} + +static inline void WRITECFG32(u32 addr, u32 data) +{ + *(u32 *)(cfg_space + (addr & ~3)) = data; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (pin == 0) + return -1; + + return K_BCM1480_INT_PCI_INTA - 1 + pin; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +/* + * Some checks before doing config cycles: + * In PCI Device Mode, hide everything on bus 0 except the LDT host + * bridge. Otherwise, access is controlled by bridge MasterEn bits. + */ +static int bcm1480_pci_can_access(struct pci_bus *bus, int devfn) +{ + u32 devno; + + if (!(bcm1480_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE))) + return 0; + + if (bus->number == 0) { + devno = PCI_SLOT(devfn); + if (bcm1480_bus_status & PCI_DEVICE_MODE) + return 0; + else + return 1; + } else + return 1; +} + +/* + * Read/write access functions for various sizes of values + * in config space. Return all 1's for disallowed accesses + * for a kludgy but adequate simulation of master aborts. + */ + +static int bcm1480_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (bcm1480_pci_can_access(bus, devfn)) + data = READCFG32(CFGADDR(bus, devfn, where)); + else + data = 0xFFFFFFFF; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 cfgaddr = CFGADDR(bus, devfn, where); + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!bcm1480_pci_can_access(bus, devfn)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + data = READCFG32(cfgaddr); + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else + data = val; + + WRITECFG32(cfgaddr, data); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops bcm1480_pci_ops = { + .read = bcm1480_pcibios_read, + .write = bcm1480_pcibios_write, +}; + +static struct resource bcm1480_mem_resource = { + .name = "BCM1480 PCI MEM", + .start = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES, + .end = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES + 0xfffffffUL, + .flags = IORESOURCE_MEM, +}; + +static struct resource bcm1480_io_resource = { + .name = "BCM1480 PCI I/O", + .start = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, + .end = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES + 0x1ffffffUL, + .flags = IORESOURCE_IO, +}; + +struct pci_controller bcm1480_controller = { + .pci_ops = &bcm1480_pci_ops, + .mem_resource = &bcm1480_mem_resource, + .io_resource = &bcm1480_io_resource, + .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, +}; + + +static int __init bcm1480_pcibios_init(void) +{ + uint32_t cmdreg; + uint64_t reg; + + /* CFE will assign PCI resources */ + pci_set_flags(PCI_PROBE_ONLY); + + /* Avoid ISA compat ranges. */ + PCIBIOS_MIN_IO = 0x00008000UL; + PCIBIOS_MIN_MEM = 0x01000000UL; + + /* Set I/O resource limits. - unlimited for now to accommodate HT */ + ioport_resource.end = 0xffffffffUL; + iomem_resource.end = 0xffffffffUL; + + cfg_space = ioremap(A_BCM1480_PHYS_PCI_CFG_MATCH_BITS, 16*1024*1024); + + /* + * See if the PCI bus has been configured by the firmware. + */ + reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG)); + if (!(reg & M_BCM1480_SYS_PCI_HOST)) { + bcm1480_bus_status |= PCI_DEVICE_MODE; + } else { + cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), + PCI_COMMAND)); + if (!(cmdreg & PCI_COMMAND_MASTER)) { + printk + ("PCI: Skipping PCI probe. Bus is not initialized.\n"); + iounmap(cfg_space); + return 1; /* XXX */ + } + bcm1480_bus_status |= PCI_BUS_ENABLED; + } + + /* turn on ExpMemEn */ + cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40)); + WRITECFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40), + cmdreg | 0x10); + cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40)); + + /* + * Establish mappings in KSEG2 (kernel virtual) to PCI I/O + * space. Use "match bytes" policy to make everything look + * little-endian. So, you need to also set + * CONFIG_SWAP_IO_SPACE, but this is the combination that + * works correctly with most of Linux's drivers. + * XXX ehs: Should this happen in PCI Device mode? + */ + + bcm1480_controller.io_map_base = (unsigned long) + ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536); + bcm1480_controller.io_map_base -= bcm1480_controller.io_offset; + set_io_port_base(bcm1480_controller.io_map_base); + + register_pci_controller(&bcm1480_controller); + +#ifdef CONFIG_VGA_CONSOLE + console_lock(); + do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1); + console_unlock(); +#endif + return 0; +} + +arch_initcall(bcm1480_pcibios_init); diff --git a/arch/mips/pci/pci-bcm1480ht.c b/arch/mips/pci/pci-bcm1480ht.c new file mode 100644 index 000000000..3d996acd2 --- /dev/null +++ b/arch/mips/pci/pci-bcm1480ht.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2001,2002,2005 Broadcom Corporation + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + */ + +/* + * BCM1480/1455-specific HT support (looking like PCI) + * + * This module provides the glue between Linux's PCI subsystem + * and the hardware. We basically provide glue for accessing + * configuration space, and set up the translation for I/O + * space accesses. + * + * To access configuration space, we use ioremap. In the 32-bit + * kernel, this consumes either 4 or 8 page table pages, and 16MB of + * kernel mapped memory. Hopefully neither of these should be a huge + * problem. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Macros for calculating offsets into config space given a device + * structure or dev/fun/reg + */ +#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where)) +#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where) + +static void *ht_cfg_space; + +#define PCI_BUS_ENABLED 1 +#define PCI_DEVICE_MODE 2 + +static int bcm1480ht_bus_status; + +#define PCI_BRIDGE_DEVICE 0 +#define HT_BRIDGE_DEVICE 1 + +/* + * HT's level-sensitive interrupts require EOI, which is generated + * through a 4MB memory-mapped region + */ +unsigned long ht_eoi_space; + +/* + * Read/write 32-bit values in config space. + */ +static inline u32 READCFG32(u32 addr) +{ + return *(u32 *)(ht_cfg_space + (addr&~3)); +} + +static inline void WRITECFG32(u32 addr, u32 data) +{ + *(u32 *)(ht_cfg_space + (addr & ~3)) = data; +} + +/* + * Some checks before doing config cycles: + * In PCI Device Mode, hide everything on bus 0 except the LDT host + * bridge. Otherwise, access is controlled by bridge MasterEn bits. + */ +static int bcm1480ht_can_access(struct pci_bus *bus, int devfn) +{ + u32 devno; + + if (!(bcm1480ht_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE))) + return 0; + + if (bus->number == 0) { + devno = PCI_SLOT(devfn); + if (bcm1480ht_bus_status & PCI_DEVICE_MODE) + return 0; + } + return 1; +} + +/* + * Read/write access functions for various sizes of values + * in config space. Return all 1's for disallowed accesses + * for a kludgy but adequate simulation of master aborts. + */ + +static int bcm1480ht_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (bcm1480ht_can_access(bus, devfn)) + data = READCFG32(CFGADDR(bus, devfn, where)); + else + data = 0xFFFFFFFF; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm1480ht_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 cfgaddr = CFGADDR(bus, devfn, where); + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!bcm1480ht_can_access(bus, devfn)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + data = READCFG32(cfgaddr); + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else + data = val; + + WRITECFG32(cfgaddr, data); + + return PCIBIOS_SUCCESSFUL; +} + +static int bcm1480ht_pcibios_get_busno(void) +{ + return 0; +} + +struct pci_ops bcm1480ht_pci_ops = { + .read = bcm1480ht_pcibios_read, + .write = bcm1480ht_pcibios_write, +}; + +static struct resource bcm1480ht_mem_resource = { + .name = "BCM1480 HT MEM", + .start = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES, + .end = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES + 0x1fffffffUL, + .flags = IORESOURCE_MEM, +}; + +static struct resource bcm1480ht_io_resource = { + .name = "BCM1480 HT I/O", + .start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES, + .end = A_BCM1480_PHYS_HT_IO_MATCH_BYTES + 0x01ffffffUL, + .flags = IORESOURCE_IO, +}; + +struct pci_controller bcm1480ht_controller = { + .pci_ops = &bcm1480ht_pci_ops, + .mem_resource = &bcm1480ht_mem_resource, + .io_resource = &bcm1480ht_io_resource, + .index = 1, + .get_busno = bcm1480ht_pcibios_get_busno, + .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES, +}; + +static int __init bcm1480ht_pcibios_init(void) +{ + ht_cfg_space = ioremap(A_BCM1480_PHYS_HT_CFG_MATCH_BITS, 16*1024*1024); + + /* CFE doesn't always init all HT paths, so we always scan */ + bcm1480ht_bus_status |= PCI_BUS_ENABLED; + + ht_eoi_space = (unsigned long) + ioremap(A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES, + 4 * 1024 * 1024); + bcm1480ht_controller.io_map_base = (unsigned long) + ioremap(A_BCM1480_PHYS_HT_IO_MATCH_BYTES, 65536); + bcm1480ht_controller.io_map_base -= bcm1480ht_controller.io_offset; + + register_pci_controller(&bcm1480ht_controller); + + return 0; +} + +arch_initcall(bcm1480ht_pcibios_init); diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c new file mode 100644 index 000000000..ffac06a5c --- /dev/null +++ b/arch/mips/pci/pci-bcm47xx.c @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2008 Aurelien Jarno + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return 0; +} + +#ifdef CONFIG_BCM47XX_SSB +static int bcm47xx_pcibios_plat_dev_init_ssb(struct pci_dev *dev) +{ + int res; + u8 slot, pin; + + res = ssb_pcibios_plat_dev_init(dev); + if (res < 0) { + pci_alert(dev, "PCI: Failed to init device\n"); + return res; + } + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + slot = PCI_SLOT(dev->devfn); + res = ssb_pcibios_map_irq(dev, slot, pin); + + /* IRQ-0 and IRQ-1 are software interrupts. */ + if (res < 2) { + pci_alert(dev, "PCI: Failed to map IRQ of device\n"); + return res; + } + + dev->irq = res; + return 0; +} +#endif + +#ifdef CONFIG_BCM47XX_BCMA +static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev) +{ + int res; + + res = bcma_core_pci_plat_dev_init(dev); + if (res < 0) { + pci_alert(dev, "PCI: Failed to init device\n"); + return res; + } + + res = bcma_core_pci_pcibios_map_irq(dev); + + /* IRQ-0 and IRQ-1 are software interrupts. */ + if (res < 2) { + pci_alert(dev, "PCI: Failed to map IRQ of device\n"); + return res; + } + + dev->irq = res; + return 0; +} +#endif + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ +#ifdef CONFIG_BCM47XX_SSB + if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB) + return bcm47xx_pcibios_plat_dev_init_ssb(dev); +#endif +#ifdef CONFIG_BCM47XX_BCMA + if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) + return bcm47xx_pcibios_plat_dev_init_bcma(dev); +#endif + return 0; +} diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c new file mode 100644 index 000000000..ac8324377 --- /dev/null +++ b/arch/mips/pci/pci-bcm63xx.c @@ -0,0 +1,351 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pci-bcm63xx.h" + +/* + * Allow PCI to be disabled at runtime depending on board nvram + * configuration + */ +int bcm63xx_pci_enabled; + +static struct resource bcm_pci_mem_resource = { + .name = "bcm63xx PCI memory space", + .start = BCM_PCI_MEM_BASE_PA, + .end = BCM_PCI_MEM_END_PA, + .flags = IORESOURCE_MEM +}; + +static struct resource bcm_pci_io_resource = { + .name = "bcm63xx PCI IO space", + .start = BCM_PCI_IO_BASE_PA, +#ifdef CONFIG_CARDBUS + .end = BCM_PCI_IO_HALF_PA, +#else + .end = BCM_PCI_IO_END_PA, +#endif + .flags = IORESOURCE_IO +}; + +struct pci_controller bcm63xx_controller = { + .pci_ops = &bcm63xx_pci_ops, + .io_resource = &bcm_pci_io_resource, + .mem_resource = &bcm_pci_mem_resource, +}; + +/* + * We handle cardbus via a fake Cardbus bridge, memory and io spaces + * have to be clearly separated from PCI one since we have different + * memory decoder. + */ +#ifdef CONFIG_CARDBUS +static struct resource bcm_cb_mem_resource = { + .name = "bcm63xx Cardbus memory space", + .start = BCM_CB_MEM_BASE_PA, + .end = BCM_CB_MEM_END_PA, + .flags = IORESOURCE_MEM +}; + +static struct resource bcm_cb_io_resource = { + .name = "bcm63xx Cardbus IO space", + .start = BCM_PCI_IO_HALF_PA + 1, + .end = BCM_PCI_IO_END_PA, + .flags = IORESOURCE_IO +}; + +struct pci_controller bcm63xx_cb_controller = { + .pci_ops = &bcm63xx_cb_ops, + .io_resource = &bcm_cb_io_resource, + .mem_resource = &bcm_cb_mem_resource, +}; +#endif + +static struct resource bcm_pcie_mem_resource = { + .name = "bcm63xx PCIe memory space", + .start = BCM_PCIE_MEM_BASE_PA, + .end = BCM_PCIE_MEM_END_PA, + .flags = IORESOURCE_MEM, +}; + +static struct resource bcm_pcie_io_resource = { + .name = "bcm63xx PCIe IO space", + .start = 0, + .end = 0, + .flags = 0, +}; + +struct pci_controller bcm63xx_pcie_controller = { + .pci_ops = &bcm63xx_pcie_ops, + .io_resource = &bcm_pcie_io_resource, + .mem_resource = &bcm_pcie_mem_resource, +}; + +static u32 bcm63xx_int_cfg_readl(u32 reg) +{ + u32 tmp; + + tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK; + tmp |= MPI_PCICFGCTL_WRITEEN_MASK; + bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG); + iob(); + return bcm_mpi_readl(MPI_PCICFGDATA_REG); +} + +static void bcm63xx_int_cfg_writel(u32 val, u32 reg) +{ + u32 tmp; + + tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK; + tmp |= MPI_PCICFGCTL_WRITEEN_MASK; + bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG); + bcm_mpi_writel(val, MPI_PCICFGDATA_REG); +} + +void __iomem *pci_iospace_start; + +static void __init bcm63xx_reset_pcie(void) +{ + u32 val; + u32 reg; + + /* enable SERDES */ + if (BCMCPU_IS_6328()) + reg = MISC_SERDES_CTRL_6328_REG; + else + reg = MISC_SERDES_CTRL_6362_REG; + + val = bcm_misc_readl(reg); + val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; + bcm_misc_writel(val, reg); + + /* reset the PCIe core */ + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1); + mdelay(10); + + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0); + mdelay(10); + + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0); + mdelay(200); +} + +static struct clk *pcie_clk; + +static int __init bcm63xx_register_pcie(void) +{ + u32 val; + + /* enable clock */ + pcie_clk = clk_get(NULL, "pcie"); + if (IS_ERR_OR_NULL(pcie_clk)) + return -ENODEV; + + clk_prepare_enable(pcie_clk); + + bcm63xx_reset_pcie(); + + /* configure the PCIe bridge */ + val = bcm_pcie_readl(PCIE_BRIDGE_OPT1_REG); + val |= OPT1_RD_BE_OPT_EN; + val |= OPT1_RD_REPLY_BE_FIX_EN; + val |= OPT1_PCIE_BRIDGE_HOLE_DET_EN; + val |= OPT1_L1_INT_STATUS_MASK_POL; + bcm_pcie_writel(val, PCIE_BRIDGE_OPT1_REG); + + /* setup the interrupts */ + val = bcm_pcie_readl(PCIE_BRIDGE_RC_INT_MASK_REG); + val |= PCIE_RC_INT_A | PCIE_RC_INT_B | PCIE_RC_INT_C | PCIE_RC_INT_D; + bcm_pcie_writel(val, PCIE_BRIDGE_RC_INT_MASK_REG); + + val = bcm_pcie_readl(PCIE_BRIDGE_OPT2_REG); + /* enable credit checking and error checking */ + val |= OPT2_TX_CREDIT_CHK_EN; + val |= OPT2_UBUS_UR_DECODE_DIS; + + /* set device bus/func for the pcie device */ + val |= (PCIE_BUS_DEVICE << OPT2_CFG_TYPE1_BUS_NO_SHIFT); + val |= OPT2_CFG_TYPE1_BD_SEL; + bcm_pcie_writel(val, PCIE_BRIDGE_OPT2_REG); + + /* setup class code as bridge */ + val = bcm_pcie_readl(PCIE_IDVAL3_REG); + val &= ~IDVAL3_CLASS_CODE_MASK; + val |= PCI_CLASS_BRIDGE_PCI_NORMAL; + bcm_pcie_writel(val, PCIE_IDVAL3_REG); + + /* disable bar1 size */ + val = bcm_pcie_readl(PCIE_CONFIG2_REG); + val &= ~CONFIG2_BAR1_SIZE_MASK; + bcm_pcie_writel(val, PCIE_CONFIG2_REG); + + /* set bar0 to little endian */ + val = (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_BASE_SHIFT; + val |= (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_MASK_SHIFT; + val |= BASEMASK_REMAP_EN; + bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_BASEMASK_REG); + + val = (BCM_PCIE_MEM_BASE_PA >> 20) << REBASE_ADDR_BASE_SHIFT; + bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_REBASE_ADDR_REG); + + register_pci_controller(&bcm63xx_pcie_controller); + + return 0; +} + +static int __init bcm63xx_register_pci(void) +{ + unsigned int mem_size; + u32 val; + /* + * configuration access are done through IO space, remap 4 + * first bytes to access it from CPU. + * + * this means that no io access from CPU should happen while + * we do a configuration cycle, but there's no way we can add + * a spinlock for each io access, so this is currently kind of + * broken on SMP. + */ + pci_iospace_start = ioremap(BCM_PCI_IO_BASE_PA, 4); + if (!pci_iospace_start) + return -ENOMEM; + + /* setup local bus to PCI access (PCI memory) */ + val = BCM_PCI_MEM_BASE_PA & MPI_L2P_BASE_MASK; + bcm_mpi_writel(val, MPI_L2PMEMBASE1_REG); + bcm_mpi_writel(~(BCM_PCI_MEM_SIZE - 1), MPI_L2PMEMRANGE1_REG); + bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PMEMREMAP1_REG); + + /* set Cardbus IDSEL (type 0 cfg access on primary bus for + * this IDSEL will be done on Cardbus instead) */ + val = bcm_pcmcia_readl(PCMCIA_C1_REG); + val &= ~PCMCIA_C1_CBIDSEL_MASK; + val |= (CARDBUS_PCI_IDSEL << PCMCIA_C1_CBIDSEL_SHIFT); + bcm_pcmcia_writel(val, PCMCIA_C1_REG); + +#ifdef CONFIG_CARDBUS + /* setup local bus to PCI access (Cardbus memory) */ + val = BCM_CB_MEM_BASE_PA & MPI_L2P_BASE_MASK; + bcm_mpi_writel(val, MPI_L2PMEMBASE2_REG); + bcm_mpi_writel(~(BCM_CB_MEM_SIZE - 1), MPI_L2PMEMRANGE2_REG); + val |= MPI_L2PREMAP_ENABLED_MASK | MPI_L2PREMAP_IS_CARDBUS_MASK; + bcm_mpi_writel(val, MPI_L2PMEMREMAP2_REG); +#else + /* disable second access windows */ + bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG); +#endif + + /* setup local bus to PCI access (IO memory), we have only 1 + * IO window for both PCI and cardbus, but it cannot handle + * both at the same time, assume standard PCI for now, if + * cardbus card has IO zone, PCI fixup will change window to + * cardbus */ + val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK; + bcm_mpi_writel(val, MPI_L2PIOBASE_REG); + bcm_mpi_writel(~(BCM_PCI_IO_SIZE - 1), MPI_L2PIORANGE_REG); + bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PIOREMAP_REG); + + /* enable PCI related GPIO pins */ + bcm_mpi_writel(MPI_LOCBUSCTL_EN_PCI_GPIO_MASK, MPI_LOCBUSCTL_REG); + + /* setup PCI to local bus access, used by PCI device to target + * local RAM while bus mastering */ + bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3); + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) + val = MPI_SP0_REMAP_ENABLE_MASK; + else + val = 0; + bcm_mpi_writel(val, MPI_SP0_REMAP_REG); + + bcm63xx_int_cfg_writel(0x0, PCI_BASE_ADDRESS_4); + bcm_mpi_writel(0, MPI_SP1_REMAP_REG); + + mem_size = bcm63xx_get_memory_size(); + + /* 6348 before rev b0 exposes only 16 MB of RAM memory through + * PCI, throw a warning if we have more memory */ + if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() & 0xf0) == 0xa0) { + if (mem_size > (16 * 1024 * 1024)) + printk(KERN_WARNING "bcm63xx: this CPU " + "revision cannot handle more than 16MB " + "of RAM for PCI bus mastering\n"); + } else { + /* setup sp0 range to local RAM size */ + bcm_mpi_writel(~(mem_size - 1), MPI_SP0_RANGE_REG); + bcm_mpi_writel(0, MPI_SP1_RANGE_REG); + } + + /* change host bridge retry counter to infinite number of + * retry, needed for some broadcom wifi cards with Silicon + * Backplane bus where access to srom seems very slow */ + val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS); + val &= ~REG_TIMER_RETRY_MASK; + bcm63xx_int_cfg_writel(val, BCMPCI_REG_TIMERS); + + /* enable memory decoder and bus mastering */ + val = bcm63xx_int_cfg_readl(PCI_COMMAND); + val |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + bcm63xx_int_cfg_writel(val, PCI_COMMAND); + + /* enable read prefetching & disable byte swapping for bus + * mastering transfers */ + val = bcm_mpi_readl(MPI_PCIMODESEL_REG); + val &= ~MPI_PCIMODESEL_BAR1_NOSWAP_MASK; + val &= ~MPI_PCIMODESEL_BAR2_NOSWAP_MASK; + val &= ~MPI_PCIMODESEL_PREFETCH_MASK; + val |= (8 << MPI_PCIMODESEL_PREFETCH_SHIFT); + bcm_mpi_writel(val, MPI_PCIMODESEL_REG); + + /* enable pci interrupt */ + val = bcm_mpi_readl(MPI_LOCINT_REG); + val |= MPI_LOCINT_MASK(MPI_LOCINT_EXT_PCI_INT); + bcm_mpi_writel(val, MPI_LOCINT_REG); + + register_pci_controller(&bcm63xx_controller); + +#ifdef CONFIG_CARDBUS + register_pci_controller(&bcm63xx_cb_controller); +#endif + + /* mark memory space used for IO mapping as reserved */ + request_mem_region(BCM_PCI_IO_BASE_PA, BCM_PCI_IO_SIZE, + "bcm63xx PCI IO space"); + return 0; +} + + +static int __init bcm63xx_pci_init(void) +{ + if (!bcm63xx_pci_enabled) + return -ENODEV; + + switch (bcm63xx_get_cpu_id()) { + case BCM6328_CPU_ID: + case BCM6362_CPU_ID: + return bcm63xx_register_pcie(); + case BCM3368_CPU_ID: + case BCM6348_CPU_ID: + case BCM6358_CPU_ID: + case BCM6368_CPU_ID: + return bcm63xx_register_pci(); + default: + return -ENODEV; + } +} + +arch_initcall(bcm63xx_pci_init); diff --git a/arch/mips/pci/pci-bcm63xx.h b/arch/mips/pci/pci-bcm63xx.h new file mode 100644 index 000000000..214def1e4 --- /dev/null +++ b/arch/mips/pci/pci-bcm63xx.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PCI_BCM63XX_H_ +#define PCI_BCM63XX_H_ + +#include +#include +#include +#include + +/* + * Cardbus shares the PCI bus, but has no IDSEL, so a special id is + * reserved for it. If you have a standard PCI device at this id, you + * need to change the following definition. + */ +#define CARDBUS_PCI_IDSEL 0x8 + + +#define PCIE_BUS_BRIDGE 0 +#define PCIE_BUS_DEVICE 1 + +/* + * defined in ops-bcm63xx.c + */ +extern struct pci_ops bcm63xx_pci_ops; +extern struct pci_ops bcm63xx_cb_ops; +extern struct pci_ops bcm63xx_pcie_ops; + +/* + * defined in pci-bcm63xx.c + */ +extern void __iomem *pci_iospace_start; + +#endif /* ! PCI_BCM63XX_H_ */ diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c new file mode 100644 index 000000000..d2d68bac3 --- /dev/null +++ b/arch/mips/pci/pci-generic.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton + * + * pcibios_align_resource taken from arch/arm/kernel/bios32.c. + */ + +#include + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might have be mirrored at 0x0100-0x03ff.. + */ +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + resource_size_t start = res->start; + struct pci_host_bridge *host_bridge; + + if (res->flags & IORESOURCE_IO && start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + + start = (start + align - 1) & ~(align - 1); + + host_bridge = pci_find_host_bridge(dev->bus); + + if (host_bridge->align_resource) + return host_bridge->align_resource(dev, res, + start, size, align); + + return start; +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + pci_read_bridge_bases(bus); +} + +#ifdef pci_remap_iospace +int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) +{ + unsigned long vaddr; + + if (res->start != 0) { + WARN_ONCE(1, "resource start address is not zero\n"); + return -ENODEV; + } + + vaddr = (unsigned long)ioremap(phys_addr, resource_size(res)); + set_io_port_base(vaddr); + return 0; +} +#endif diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c new file mode 100644 index 000000000..d85cbf84e --- /dev/null +++ b/arch/mips/pci/pci-ip27.c @@ -0,0 +1,42 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Christoph Hellwig (hch@lst.de) + * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_NUMA +int pcibus_to_node(struct pci_bus *bus) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + + return bc->nasid; +} +EXPORT_SYMBOL(pcibus_to_node); +#endif /* CONFIG_NUMA */ + +static void ip29_fixup_phy(struct pci_dev *dev) +{ + int nasid = pcibus_to_node(dev->bus); + u32 sid; + + if (nasid != 1) + return; /* only needed on second module */ + + /* enable ethernet PHY on IP29 systemboard */ + pci_read_config_dword(dev, PCI_SUBSYSTEM_VENDOR_ID, &sid); + if (sid == (PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_IP29_SYSBOARD) << 16)) + REMOTE_HUB_S(nasid, MD_LED0, 0x09); +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, + ip29_fixup_phy); diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c new file mode 100644 index 000000000..7ae89d0c7 --- /dev/null +++ b/arch/mips/pci/pci-ip32.c @@ -0,0 +1,147 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 2001 Keith M Wesolowski + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_MACE_PCI + +/* + * Handle errors from the bridge. This includes master and target aborts, + * various command and address errors, and the interrupt test. This gets + * registered on the bridge error irq. It's conceivable that some of these + * conditions warrant a panic. Anybody care to say which ones? + */ +static irqreturn_t macepci_error(int irq, void *dev) +{ + char s; + unsigned int flags = mace->pci.error; + unsigned int addr = mace->pci.error_addr; + + if (flags & MACEPCI_ERROR_MEMORY_ADDR) + s = 'M'; + else if (flags & MACEPCI_ERROR_CONFIG_ADDR) + s = 'C'; + else + s = 'X'; + + if (flags & MACEPCI_ERROR_MASTER_ABORT) { + printk("MACEPCI: Master abort at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_MASTER_ABORT; + } + if (flags & MACEPCI_ERROR_TARGET_ABORT) { + printk("MACEPCI: Target abort at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_TARGET_ABORT; + } + if (flags & MACEPCI_ERROR_DATA_PARITY_ERR) { + printk("MACEPCI: Data parity error at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_DATA_PARITY_ERR; + } + if (flags & MACEPCI_ERROR_RETRY_ERR) { + printk("MACEPCI: Retry error at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_RETRY_ERR; + } + if (flags & MACEPCI_ERROR_ILLEGAL_CMD) { + printk("MACEPCI: Illegal command at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_ILLEGAL_CMD; + } + if (flags & MACEPCI_ERROR_SYSTEM_ERR) { + printk("MACEPCI: System error at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_SYSTEM_ERR; + } + if (flags & MACEPCI_ERROR_PARITY_ERR) { + printk("MACEPCI: Parity error at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_PARITY_ERR; + } + if (flags & MACEPCI_ERROR_OVERRUN) { + printk("MACEPCI: Overrun error at 0x%08x (%c)\n", addr, s); + flags &= ~MACEPCI_ERROR_OVERRUN; + } + if (flags & MACEPCI_ERROR_SIG_TABORT) { + printk("MACEPCI: Signaled target abort (clearing)\n"); + flags &= ~MACEPCI_ERROR_SIG_TABORT; + } + if (flags & MACEPCI_ERROR_INTERRUPT_TEST) { + printk("MACEPCI: Interrupt test triggered (clearing)\n"); + flags &= ~MACEPCI_ERROR_INTERRUPT_TEST; + } + + mace->pci.error = flags; + + return IRQ_HANDLED; +} + + +extern struct pci_ops mace_pci_ops; +#ifdef CONFIG_64BIT +static struct resource mace_pci_mem_resource = { + .name = "SGI O2 PCI MEM", + .start = MACEPCI_HI_MEMORY, + .end = 0x2FFFFFFFFUL, + .flags = IORESOURCE_MEM, +}; +static struct resource mace_pci_io_resource = { + .name = "SGI O2 PCI IO", + .start = 0x00000000UL, + .end = 0xffffffffUL, + .flags = IORESOURCE_IO, +}; +#define MACE_PCI_MEM_OFFSET 0x200000000 +#else +static struct resource mace_pci_mem_resource = { + .name = "SGI O2 PCI MEM", + .start = MACEPCI_LOW_MEMORY, + .end = MACEPCI_LOW_MEMORY + 0x2000000 - 1, + .flags = IORESOURCE_MEM, +}; +static struct resource mace_pci_io_resource = { + .name = "SGI O2 PCI IO", + .start = 0x00000000, + .end = 0xFFFFFFFF, + .flags = IORESOURCE_IO, +}; +#define MACE_PCI_MEM_OFFSET (MACEPCI_LOW_MEMORY - 0x80000000) +#endif +static struct pci_controller mace_pci_controller = { + .pci_ops = &mace_pci_ops, + .mem_resource = &mace_pci_mem_resource, + .io_resource = &mace_pci_io_resource, + .mem_offset = MACE_PCI_MEM_OFFSET, + .io_offset = 0, + .io_map_base = CKSEG1ADDR(MACEPCI_LOW_IO), +}; + +static int __init mace_init(void) +{ + PCIBIOS_MIN_IO = 0x1000; + + /* Clear any outstanding errors and enable interrupts */ + mace->pci.error_addr = 0; + mace->pci.error = 0; + mace->pci.control = 0xff008500; + + printk("MACE PCI rev %d\n", mace->pci.rev); + + BUG_ON(request_irq(MACE_PCI_BRIDGE_IRQ, macepci_error, 0, + "MACE PCI error", NULL)); + + /* extend memory resources */ + iomem_resource.end = mace_pci_mem_resource.end; + ioport_resource = mace_pci_io_resource; + + register_pci_controller(&mace_pci_controller); + + return 0; +} + +arch_initcall(mace_init); diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c new file mode 100644 index 000000000..8d16cd021 --- /dev/null +++ b/arch/mips/pci/pci-lantiq.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "pci-lantiq.h" + +#define PCI_CR_FCI_ADDR_MAP0 0x00C0 +#define PCI_CR_FCI_ADDR_MAP1 0x00C4 +#define PCI_CR_FCI_ADDR_MAP2 0x00C8 +#define PCI_CR_FCI_ADDR_MAP3 0x00CC +#define PCI_CR_FCI_ADDR_MAP4 0x00D0 +#define PCI_CR_FCI_ADDR_MAP5 0x00D4 +#define PCI_CR_FCI_ADDR_MAP6 0x00D8 +#define PCI_CR_FCI_ADDR_MAP7 0x00DC +#define PCI_CR_CLK_CTRL 0x0000 +#define PCI_CR_PCI_MOD 0x0030 +#define PCI_CR_PC_ARB 0x0080 +#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4 +#define PCI_CR_BAR11MASK 0x0044 +#define PCI_CR_BAR12MASK 0x0048 +#define PCI_CR_BAR13MASK 0x004C +#define PCI_CS_BASE_ADDR1 0x0010 +#define PCI_CR_PCI_ADDR_MAP11 0x0064 +#define PCI_CR_FCI_BURST_LENGTH 0x00E8 +#define PCI_CR_PCI_EOI 0x002C +#define PCI_CS_STS_CMD 0x0004 + +#define PCI_MASTER0_REQ_MASK_2BITS 8 +#define PCI_MASTER1_REQ_MASK_2BITS 10 +#define PCI_MASTER2_REQ_MASK_2BITS 12 +#define INTERNAL_ARB_ENABLE_BIT 0 + +#define LTQ_CGU_IFCCR 0x0018 +#define LTQ_CGU_PCICR 0x0034 + +#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y)) +#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x)) + +#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) +#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) + +__iomem void *ltq_pci_mapped_cfg; +static __iomem void *ltq_pci_membase; + +static struct gpio_desc *reset_gpio; +static struct clk *clk_pci, *clk_external; +static struct resource pci_io_resource; +static struct resource pci_mem_resource; +static struct pci_ops pci_ops = { + .read = ltq_pci_read_config_dword, + .write = ltq_pci_write_config_dword +}; + +static struct pci_controller pci_controller = { + .pci_ops = &pci_ops, + .mem_resource = &pci_mem_resource, + .mem_offset = 0x00000000UL, + .io_resource = &pci_io_resource, + .io_offset = 0x00000000UL, +}; + +static inline u32 ltq_calc_bar11mask(void) +{ + u32 mem, bar11mask; + + /* BAR11MASK value depends on available memory on system. */ + mem = get_num_physpages() * PAGE_SIZE; + bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; + + return bar11mask; +} + +static int ltq_pci_startup(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const __be32 *req_mask, *bus_clk; + u32 temp_buffer; + int error; + + /* get our clocks */ + clk_pci = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk_pci)) { + dev_err(&pdev->dev, "failed to get pci clock\n"); + return PTR_ERR(clk_pci); + } + + clk_external = clk_get(&pdev->dev, "external"); + if (IS_ERR(clk_external)) { + clk_put(clk_pci); + dev_err(&pdev->dev, "failed to get external pci clock\n"); + return PTR_ERR(clk_external); + } + + /* read the bus speed that we want */ + bus_clk = of_get_property(node, "lantiq,bus-clock", NULL); + if (bus_clk) + clk_set_rate(clk_pci, *bus_clk); + + /* and enable the clocks */ + clk_enable(clk_pci); + if (of_find_property(node, "lantiq,external-clock", NULL)) + clk_enable(clk_external); + else + clk_disable(clk_external); + + /* setup reset gpio used by pci */ + reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", + GPIOD_OUT_LOW); + error = PTR_ERR_OR_ZERO(reset_gpio); + if (error) { + dev_err(&pdev->dev, "failed to request gpio: %d\n", error); + return error; + } + gpiod_set_consumer_name(reset_gpio, "pci_reset"); + + /* enable auto-switching between PCI and EBU */ + ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); + + /* busy, i.e. configuration is not done, PCI access has to be retried */ + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); + wmb(); + /* BUS Master/IO/MEM access */ + ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); + + /* enable external 2 PCI masters */ + temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); + /* setup the request mask */ + req_mask = of_get_property(node, "req-mask", NULL); + if (req_mask) + temp_buffer &= ~((*req_mask & 0xf) << 16); + else + temp_buffer &= ~0xf0000; + /* enable internal arbiter */ + temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); + /* enable internal PCI master reqest */ + temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); + + /* enable EBU request */ + temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); + + /* enable all external masters request */ + temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); + ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); + wmb(); + + /* setup BAR memory regions */ + ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); + ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); + ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); + ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); + ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); + ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); + ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); + ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); + ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); + ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); + ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); + ltq_pci_w32(0, PCI_CS_BASE_ADDR1); + /* both TX and RX endian swap are enabled */ + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); + wmb(); + ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, + PCI_CR_BAR12MASK); + ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, + PCI_CR_BAR13MASK); + /*use 8 dw burst length */ + ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); + wmb(); + + /* setup irq line */ + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); + + /* toggle reset pin */ + if (reset_gpio) { + gpiod_set_value_cansleep(reset_gpio, 1); + wmb(); + mdelay(1); + gpiod_set_value_cansleep(reset_gpio, 0); + } + return 0; +} + +static int ltq_pci_probe(struct platform_device *pdev) +{ + struct resource *res_cfg, *res_bridge; + + pci_clear_flags(PCI_PROBE_ONLY); + + res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge); + if (IS_ERR(ltq_pci_membase)) + return PTR_ERR(ltq_pci_membase); + + res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg); + if (IS_ERR(ltq_pci_mapped_cfg)) + return PTR_ERR(ltq_pci_mapped_cfg); + + ltq_pci_startup(pdev); + + pci_load_of_ranges(&pci_controller, pdev->dev.of_node); + register_pci_controller(&pci_controller); + return 0; +} + +static const struct of_device_id ltq_pci_match[] = { + { .compatible = "lantiq,pci-xway" }, + {}, +}; + +static struct platform_driver ltq_pci_driver = { + .probe = ltq_pci_probe, + .driver = { + .name = "pci-xway", + .of_match_table = ltq_pci_match, + }, +}; + +int __init pcibios_init(void) +{ + int ret = platform_driver_register(<q_pci_driver); + if (ret) + pr_info("pci-xway: Error registering platform driver!"); + return ret; +} + +arch_initcall(pcibios_init); diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h new file mode 100644 index 000000000..fdbb0e89b --- /dev/null +++ b/arch/mips/pci/pci-lantiq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_PCI_H__ +#define _LTQ_PCI_H__ + +extern __iomem void *ltq_pci_mapped_cfg; +extern int ltq_pci_read_config_dword(struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); +extern int ltq_pci_write_config_dword(struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); + +#endif diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c new file mode 100644 index 000000000..468722c8a --- /dev/null +++ b/arch/mips/pci/pci-legacy.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2011 Wind River Systems, + * written by Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource + * assignments. + */ + +/* + * The PCI controller list. + */ +static LIST_HEAD(controllers); + +static int pci_initialized; + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might have be mirrored at 0x0100-0x03ff.. + */ +resource_size_t +pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = dev->sysdata; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start < PCIBIOS_MIN_IO + hose->io_resource->start) + start = PCIBIOS_MIN_IO + hose->io_resource->start; + + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start) + start = PCIBIOS_MIN_MEM + hose->mem_resource->start; + } + + return start; +} + +static void pcibios_scanbus(struct pci_controller *hose) +{ + static int next_busno; + static int need_domain_info; + LIST_HEAD(resources); + struct pci_bus *bus; + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; + + if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY)) + next_busno = (*hose->get_busno)(); + + pci_add_resource_offset(&resources, + hose->mem_resource, hose->mem_offset); + pci_add_resource_offset(&resources, + hose->io_resource, hose->io_offset); + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = next_busno; + bridge->ops = hose->pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = pcibios_map_irq; + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + hose->bus = bus = bridge->bus; + + need_domain_info = need_domain_info || pci_domain_nr(bus); + set_pci_need_domain_info(hose, need_domain_info); + + next_busno = bus->busn_res.end + 1; + /* Don't allow 8-bit bus number overflow inside the hose - + reserve some space for bridges. */ + if (next_busno > 224) { + next_busno = 0; + need_domain_info = 1; + } + + /* + * We insert PCI resources into the iomem_resource and + * ioport_resource trees in either pci_bus_claim_resources() + * or pci_bus_assign_resources(). + */ + if (pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_claim_resources(bus); + } else { + struct pci_bus *child; + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + pci_bus_add_devices(bus); +} + +#ifdef CONFIG_OF +void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + + hose->of_node = node; + + if (of_pci_range_parser_init(&parser, node)) + return; + + for_each_of_pci_range(&parser, &range) { + struct resource *res = NULL; + + switch (range.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + hose->io_map_base = + (unsigned long)ioremap(range.cpu_addr, + range.size); + res = hose->io_resource; + break; + case IORESOURCE_MEM: + res = hose->mem_resource; + break; + } + if (res != NULL) { + res->name = node->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } + } +} + +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->of_node); +} +#endif + +static DEFINE_MUTEX(pci_scan_mutex); + +void register_pci_controller(struct pci_controller *hose) +{ + struct resource *parent; + + parent = hose->mem_resource->parent; + if (!parent) + parent = &iomem_resource; + + if (request_resource(parent, hose->mem_resource) < 0) + goto out; + + parent = hose->io_resource->parent; + if (!parent) + parent = &ioport_resource; + + if (request_resource(parent, hose->io_resource) < 0) { + release_resource(hose->mem_resource); + goto out; + } + + INIT_LIST_HEAD(&hose->list); + list_add_tail(&hose->list, &controllers); + + /* + * Do not panic here but later - this might happen before console init. + */ + if (!hose->io_map_base) { + printk(KERN_WARNING + "registering PCI controller with io_map_base unset\n"); + } + + /* + * Scan the bus if it is register after the PCI subsystem + * initialization. + */ + if (pci_initialized) { + mutex_lock(&pci_scan_mutex); + pcibios_scanbus(hose); + mutex_unlock(&pci_scan_mutex); + } + + return; + +out: + printk(KERN_WARNING + "Skipping PCI bus scan due to resource conflict\n"); +} + +static int __init pcibios_init(void) +{ + struct pci_controller *hose; + + /* Scan all of the recorded PCI controllers. */ + list_for_each_entry(hose, &controllers, list) + pcibios_scanbus(hose); + + pci_initialized = 1; + + return 0; +} + +subsys_initcall(pcibios_init); + +static int pcibios_enable_resources(struct pci_dev *dev, int mask) +{ + u16 cmd, old_cmd; + int idx; + struct resource *r; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { + /* Only set up the requested stuff */ + if (!(mask & (1<resource[idx]; + if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) + continue; + if ((idx == PCI_ROM_RESOURCE) && + (!(r->flags & IORESOURCE_ROM_ENABLE))) + continue; + if (!r->start && r->end) { + pci_err(dev, + "can't enable device: resource collisions\n"); + return -EINVAL; + } + if (r->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (r->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + if (cmd != old_cmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + int err = pcibios_enable_resources(dev, mask); + + if (err < 0) + return err; + + return pcibios_plat_dev_init(dev); +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + struct pci_dev *dev = bus->self; + + if (pci_has_flag(PCI_PROBE_ONLY) && dev && + (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_read_bridge_bases(bus); + } +} + +char * (*pcibios_plat_setup)(char *str) __initdata; + +char *__init pcibios_setup(char *str) +{ + if (pcibios_plat_setup) + return pcibios_plat_setup(str); + return str; +} diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c new file mode 100644 index 000000000..6aefdf20c --- /dev/null +++ b/arch/mips/pci/pci-malta.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + * + * MIPS boards specific PCI support. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static struct resource bonito64_mem_resource = { + .name = "Bonito PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource bonito64_io_resource = { + .name = "Bonito PCI I/O", + .start = 0x00000000UL, + .end = 0x000fffffUL, + .flags = IORESOURCE_IO, +}; + +static struct resource gt64120_mem_resource = { + .name = "GT-64120 PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource gt64120_io_resource = { + .name = "GT-64120 PCI I/O", + .flags = IORESOURCE_IO, +}; + +static struct resource msc_mem_resource = { + .name = "MSC PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource msc_io_resource = { + .name = "MSC PCI I/O", + .flags = IORESOURCE_IO, +}; + +extern struct pci_ops bonito64_pci_ops; +extern struct pci_ops gt64xxx_pci0_ops; +extern struct pci_ops msc_pci_ops; + +static struct pci_controller bonito64_controller = { + .pci_ops = &bonito64_pci_ops, + .io_resource = &bonito64_io_resource, + .mem_resource = &bonito64_mem_resource, + .io_offset = 0x00000000UL, +}; + +static struct pci_controller gt64120_controller = { + .pci_ops = >64xxx_pci0_ops, + .io_resource = >64120_io_resource, + .mem_resource = >64120_mem_resource, +}; + +static struct pci_controller msc_controller = { + .pci_ops = &msc_pci_ops, + .io_resource = &msc_io_resource, + .mem_resource = &msc_mem_resource, +}; + +void __init mips_pcibios_init(void) +{ + struct pci_controller *controller; + resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; + + switch (mips_revision_sconid) { + case MIPS_REVISION_SCON_GT64120: + /* + * Due to a bug in the Galileo system controller, we need + * to setup the PCI BAR for the Galileo internal registers. + * This should be done in the bios/bootprom and will be + * fixed in a later revision of YAMON (the MIPS boards + * boot prom). + */ + GT_WRITE(GT_PCI0_CFGADDR_OFS, + (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */ + (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */ + (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/ + ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/ + GT_PCI0_CFGADDR_CONFIGEN_BIT); + + /* Perform the write */ + GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE)); + + /* Set up resource ranges from the controller's registers. */ + start = GT_READ(GT_PCI0M0LD_OFS); + end = GT_READ(GT_PCI0M0HD_OFS); + map = GT_READ(GT_PCI0M0REMAP_OFS); + end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); + start1 = GT_READ(GT_PCI0M1LD_OFS); + end1 = GT_READ(GT_PCI0M1HD_OFS); + map1 = GT_READ(GT_PCI0M1REMAP_OFS); + end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK); + /* Cannot support multiple windows, use the wider. */ + if (end1 - start1 > end - start) { + start = start1; + end = end1; + map = map1; + } + mask = ~(start ^ end); + /* We don't support remapping with a discontiguous mask. */ + BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && + mask != ~((mask & -mask) - 1)); + gt64120_mem_resource.start = start; + gt64120_mem_resource.end = end; + gt64120_controller.mem_offset = (start & mask) - (map & mask); + /* Addresses are 36-bit, so do shifts in the destinations. */ + gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; + gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; + gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; + gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF; + + start = GT_READ(GT_PCI0IOLD_OFS); + end = GT_READ(GT_PCI0IOHD_OFS); + map = GT_READ(GT_PCI0IOREMAP_OFS); + end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); + mask = ~(start ^ end); + /* We don't support remapping with a discontiguous mask. */ + BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && + mask != ~((mask & -mask) - 1)); + gt64120_io_resource.start = map & mask; + gt64120_io_resource.end = (map & mask) | ~mask; + gt64120_controller.io_offset = 0; + /* Addresses are 36-bit, so do shifts in the destinations. */ + gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; + gt64120_io_resource.end <<= GT_PCI_DCRM_SHF; + gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; + + controller = >64120_controller; + break; + + case MIPS_REVISION_SCON_BONITO: + /* Set up resource ranges from the controller's registers. */ + map = BONITO_PCIMAP; + map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >> + BONITO_PCIMAP_PCIMAP_LO0_SHIFT; + map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >> + BONITO_PCIMAP_PCIMAP_LO1_SHIFT; + map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >> + BONITO_PCIMAP_PCIMAP_LO2_SHIFT; + /* Combine as many adjacent windows as possible. */ + map = map1; + start = BONITO_PCILO0_BASE; + end = 1; + if (map3 == map2 + 1) { + map = map2; + start = BONITO_PCILO1_BASE; + end++; + } + if (map2 == map1 + 1) { + map = map1; + start = BONITO_PCILO0_BASE; + end++; + } + bonito64_mem_resource.start = start; + bonito64_mem_resource.end = start + + BONITO_PCIMAP_WINBASE(end) - 1; + bonito64_controller.mem_offset = start - + BONITO_PCIMAP_WINBASE(map); + + controller = &bonito64_controller; + break; + + case MIPS_REVISION_SCON_SOCIT: + case MIPS_REVISION_SCON_ROCIT: + case MIPS_REVISION_SCON_SOCITSC: + case MIPS_REVISION_SCON_SOCITSCP: + /* Set up resource ranges from the controller's registers. */ + MSC_READ(MSC01_PCI_SC2PMBASL, start); + MSC_READ(MSC01_PCI_SC2PMMSKL, mask); + MSC_READ(MSC01_PCI_SC2PMMAPL, map); + msc_mem_resource.start = start & mask; + msc_mem_resource.end = (start & mask) | ~mask; + msc_controller.mem_offset = (start & mask) - (map & mask); + if (mips_cps_numiocu(0)) { + write_gcr_reg0_base(start); + write_gcr_reg0_mask(mask | + CM_GCR_REGn_MASK_CMTGT_IOCU0); + } + MSC_READ(MSC01_PCI_SC2PIOBASL, start); + MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); + MSC_READ(MSC01_PCI_SC2PIOMAPL, map); + msc_io_resource.start = map & mask; + msc_io_resource.end = (map & mask) | ~mask; + msc_controller.io_offset = 0; + ioport_resource.end = ~mask; + if (mips_cps_numiocu(0)) { + write_gcr_reg1_base(start); + write_gcr_reg1_mask(mask | + CM_GCR_REGn_MASK_CMTGT_IOCU0); + } + /* If ranges overlap I/O takes precedence. */ + start = start & mask; + end = start | ~mask; + if ((start >= msc_mem_resource.start && + start <= msc_mem_resource.end) || + (end >= msc_mem_resource.start && + end <= msc_mem_resource.end)) { + /* Use the larger space. */ + start = max(start, msc_mem_resource.start); + end = min(end, msc_mem_resource.end); + if (start - msc_mem_resource.start >= + msc_mem_resource.end - end) + msc_mem_resource.end = start - 1; + else + msc_mem_resource.start = end + 1; + } + + controller = &msc_controller; + break; + default: + return; + } + + /* PIIX4 ACPI starts at 0x1000 */ + if (controller->io_resource->start < 0x00001000UL) + controller->io_resource->start = 0x00001000UL; + + iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ + ioport_resource.end = controller->io_resource->end; + + controller->io_map_base = mips_io_port_base; + + register_pci_controller(controller); +} diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c new file mode 100644 index 000000000..e03293234 --- /dev/null +++ b/arch/mips/pci/pci-mt7620.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Ralink MT7620A SoC PCI support + * + * Copyright (C) 2007-2013 Bruce Chang (Mediatek) + * Copyright (C) 2013-2016 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RALINK_PCI_IO_MAP_BASE 0x10160000 +#define RALINK_PCI_MEMORY_BASE 0x0 + +#define RALINK_INT_PCIE0 4 + +#define RALINK_CLKCFG1 0x30 +#define RALINK_GPIOMODE 0x60 + +#define PPLL_CFG1 0x9c +#define PPLL_LD BIT(23) + +#define PPLL_DRV 0xa0 +#define PDRV_SW_SET BIT(31) +#define LC_CKDRVPD BIT(19) +#define LC_CKDRVOHZ BIT(18) +#define LC_CKDRVHZ BIT(17) +#define LC_CKTEST BIT(16) + +/* PCI Bridge registers */ +#define RALINK_PCI_PCICFG_ADDR 0x00 +#define PCIRST BIT(1) + +#define RALINK_PCI_PCIENA 0x0C +#define PCIINT2 BIT(20) + +#define RALINK_PCI_CONFIG_ADDR 0x20 +#define RALINK_PCI_CONFIG_DATA_VIRT_REG 0x24 +#define RALINK_PCI_MEMBASE 0x28 +#define RALINK_PCI_IOBASE 0x2C + +/* PCI RC registers */ +#define RALINK_PCI0_BAR0SETUP_ADDR 0x10 +#define RALINK_PCI0_IMBASEBAR0_ADDR 0x18 +#define RALINK_PCI0_ID 0x30 +#define RALINK_PCI0_CLASS 0x34 +#define RALINK_PCI0_SUBID 0x38 +#define RALINK_PCI0_STATUS 0x50 +#define PCIE_LINK_UP_ST BIT(0) + +#define PCIEPHY0_CFG 0x90 + +#define RALINK_PCIEPHY_P0_CTL_OFFSET 0x7498 +#define RALINK_PCIE0_CLK_EN BIT(26) + +#define BUSY 0x80000000 +#define WAITRETRY_MAX 10 +#define WRITE_MODE (1UL << 23) +#define DATA_SHIFT 0 +#define ADDR_SHIFT 8 + + +static void __iomem *bridge_base; +static void __iomem *pcie_base; + +static struct reset_control *rstpcie0; + +static inline void bridge_w32(u32 val, unsigned reg) +{ + iowrite32(val, bridge_base + reg); +} + +static inline u32 bridge_r32(unsigned reg) +{ + return ioread32(bridge_base + reg); +} + +static inline void pcie_w32(u32 val, unsigned reg) +{ + iowrite32(val, pcie_base + reg); +} + +static inline u32 pcie_r32(unsigned reg) +{ + return ioread32(pcie_base + reg); +} + +static inline void pcie_m32(u32 clr, u32 set, unsigned reg) +{ + u32 val = pcie_r32(reg); + + val &= ~clr; + val |= set; + pcie_w32(val, reg); +} + +static int wait_pciephy_busy(void) +{ + unsigned long reg_value = 0x0, retry = 0; + + while (1) { + reg_value = pcie_r32(PCIEPHY0_CFG); + + if (reg_value & BUSY) + mdelay(100); + else + break; + if (retry++ > WAITRETRY_MAX) { + pr_warn("PCIE-PHY retry failed.\n"); + return -1; + } + } + return 0; +} + +static void pcie_phy(unsigned long addr, unsigned long val) +{ + wait_pciephy_busy(); + pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT), + PCIEPHY0_CFG); + mdelay(1); + wait_pciephy_busy(); +} + +static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *val) +{ + unsigned int slot = PCI_SLOT(devfn); + u8 func = PCI_FUNC(devfn); + u32 address; + u32 data; + u32 num = 0; + + if (bus) + num = bus->number; + + address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) | + (func << 8) | (where & 0xfc) | 0x80000000; + bridge_w32(address, RALINK_PCI_CONFIG_ADDR); + data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + *val = (data >> ((where & 3) << 3)) & 0xffff; + break; + case 4: + *val = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 val) +{ + unsigned int slot = PCI_SLOT(devfn); + u8 func = PCI_FUNC(devfn); + u32 address; + u32 data; + u32 num = 0; + + if (bus) + num = bus->number; + + address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) | + (func << 8) | (where & 0xfc) | 0x80000000; + bridge_w32(address, RALINK_PCI_CONFIG_ADDR); + data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG); + + switch (size) { + case 1: + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 2: + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 4: + data = val; + break; + } + + bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRT_REG); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops mt7620_pci_ops = { + .read = pci_config_read, + .write = pci_config_write, +}; + +static struct resource mt7620_res_pci_mem1; +static struct resource mt7620_res_pci_io1; +struct pci_controller mt7620_controller = { + .pci_ops = &mt7620_pci_ops, + .mem_resource = &mt7620_res_pci_mem1, + .mem_offset = 0x00000000UL, + .io_resource = &mt7620_res_pci_io1, + .io_offset = 0x00000000UL, + .io_map_base = 0xa0000000, +}; + +static int mt7620_pci_hw_init(struct platform_device *pdev) +{ + /* bypass PCIe DLL */ + pcie_phy(0x0, 0x80); + pcie_phy(0x1, 0x04); + + /* Elastic buffer control */ + pcie_phy(0x68, 0xB4); + + /* put core into reset */ + pcie_m32(0, PCIRST, RALINK_PCI_PCICFG_ADDR); + reset_control_assert(rstpcie0); + + /* disable power and all clocks */ + rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); + rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV); + + /* bring core out of reset */ + reset_control_deassert(rstpcie0); + rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); + mdelay(100); + + if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { + dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n"); + reset_control_assert(rstpcie0); + rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); + return -1; + } + + /* power up the bus */ + rt_sysc_m32(LC_CKDRVHZ | LC_CKDRVOHZ, LC_CKDRVPD | PDRV_SW_SET, + PPLL_DRV); + + return 0; +} + +static int mt7628_pci_hw_init(struct platform_device *pdev) +{ + u32 val = 0; + + /* bring the core out of reset */ + rt_sysc_m32(BIT(16), 0, RALINK_GPIOMODE); + reset_control_deassert(rstpcie0); + + /* enable the pci clk */ + rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); + mdelay(100); + + /* voodoo from the SDK driver */ + pcie_m32(~0xff, 0x5, RALINK_PCIEPHY_P0_CTL_OFFSET); + + pci_config_read(NULL, 0, 0x70c, 4, &val); + val &= ~(0xff) << 8; + val |= 0x50 << 8; + pci_config_write(NULL, 0, 0x70c, 4, val); + + pci_config_read(NULL, 0, 0x70c, 4, &val); + dev_err(&pdev->dev, "Port 0 N_FTS = %x\n", (unsigned int) val); + + return 0; +} + +static int mt7620_pci_probe(struct platform_device *pdev) +{ + struct resource *bridge_res = platform_get_resource(pdev, + IORESOURCE_MEM, 0); + struct resource *pcie_res = platform_get_resource(pdev, + IORESOURCE_MEM, 1); + u32 val = 0; + + rstpcie0 = devm_reset_control_get_exclusive(&pdev->dev, "pcie0"); + if (IS_ERR(rstpcie0)) + return PTR_ERR(rstpcie0); + + bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res); + if (IS_ERR(bridge_base)) + return PTR_ERR(bridge_base); + + pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res); + if (IS_ERR(pcie_base)) + return PTR_ERR(pcie_base); + + iomem_resource.start = 0; + iomem_resource.end = ~0; + ioport_resource.start = 0; + ioport_resource.end = ~0; + + /* bring up the pci core */ + switch (ralink_soc) { + case MT762X_SOC_MT7620A: + if (mt7620_pci_hw_init(pdev)) + return -1; + break; + + case MT762X_SOC_MT7628AN: + case MT762X_SOC_MT7688: + if (mt7628_pci_hw_init(pdev)) + return -1; + break; + + default: + dev_err(&pdev->dev, "pcie is not supported on this hardware\n"); + return -1; + } + mdelay(50); + + /* enable write access */ + pcie_m32(PCIRST, 0, RALINK_PCI_PCICFG_ADDR); + mdelay(100); + + /* check if there is a card present */ + if ((pcie_r32(RALINK_PCI0_STATUS) & PCIE_LINK_UP_ST) == 0) { + reset_control_assert(rstpcie0); + rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); + if (ralink_soc == MT762X_SOC_MT7620A) + rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV); + dev_err(&pdev->dev, "PCIE0 no card, disable it(RST&CLK)\n"); + return -1; + } + + /* setup ranges */ + bridge_w32(0xffffffff, RALINK_PCI_MEMBASE); + bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE); + + pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR); + pcie_w32(RALINK_PCI_MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR); + pcie_w32(0x06040001, RALINK_PCI0_CLASS); + + /* enable interrupts */ + pcie_m32(0, PCIINT2, RALINK_PCI_PCIENA); + + /* voodoo from the SDK driver */ + pci_config_read(NULL, 0, 4, 4, &val); + pci_config_write(NULL, 0, 4, 4, val | 0x7); + + pci_load_of_ranges(&mt7620_controller, pdev->dev.of_node); + register_pci_controller(&mt7620_controller); + + return 0; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + u16 cmd; + u32 val; + int irq = 0; + + if ((dev->bus->number == 0) && (slot == 0)) { + pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR); + pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, + RALINK_PCI_MEMORY_BASE); + pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val); + } else if ((dev->bus->number == 1) && (slot == 0x0)) { + irq = RALINK_INT_PCIE0; + } else { + dev_err(&dev->dev, "no irq found - bus=0x%x, slot = 0x%x\n", + dev->bus->number, slot); + return 0; + } + dev_err(&dev->dev, "card - bus=0x%x, slot = 0x%x irq=%d\n", + dev->bus->number, slot, irq); + + /* configure the cache line size to 0x14 */ + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); + + /* configure latency timer to 0xff */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xff); + pci_read_config_word(dev, PCI_COMMAND, &cmd); + + /* setup the slot */ + cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + pci_write_config_word(dev, PCI_COMMAND, cmd); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + + return irq; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static const struct of_device_id mt7620_pci_ids[] = { + { .compatible = "mediatek,mt7620-pci" }, + {}, +}; + +static struct platform_driver mt7620_pci_driver = { + .probe = mt7620_pci_probe, + .driver = { + .name = "mt7620-pci", + .of_match_table = of_match_ptr(mt7620_pci_ids), + }, +}; + +static int __init mt7620_pci_init(void) +{ + return platform_driver_register(&mt7620_pci_driver); +} + +arch_initcall(mt7620_pci_init); diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c new file mode 100644 index 000000000..e457a18cb --- /dev/null +++ b/arch/mips/pci/pci-octeon.c @@ -0,0 +1,711 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2009 Cavium Networks + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#define USE_OCTEON_INTERNAL_ARBITER + +/* + * Octeon's PCI controller uses did=3, subdid=2 for PCI IO + * addresses. Use PCI endian swapping 1 so no address swapping is + * necessary. The Linux io routines will endian swap the data. + */ +#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull +#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32) + +/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */ +#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull) + +u64 octeon_bar1_pci_phys; + +/** + * This is the bit decoding used for the Octeon PCI controller addresses + */ +union octeon_pci_address { + uint64_t u64; + struct { + uint64_t upper:2; + uint64_t reserved:13; + uint64_t io:1; + uint64_t did:5; + uint64_t subdid:3; + uint64_t reserved2:4; + uint64_t endian_swap:2; + uint64_t reserved3:10; + uint64_t bus:8; + uint64_t dev:5; + uint64_t func:3; + uint64_t reg:8; + } s; +}; + +int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); +enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; + +/** + * Map a PCI device to the appropriate interrupt line + * + * @dev: The Linux PCI device structure for the device to map + * @slot: The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @pin: The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * Returns Interrupt number for the device + */ +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (octeon_pcibios_map_irq) + return octeon_pcibios_map_irq(dev, slot, pin); + else + panic("octeon_pcibios_map_irq not set."); +} + + +/* + * Called to perform platform specific PCI setup + */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + uint16_t config; + uint32_t dconfig; + int pos; + /* + * Force the Cache line setting to 64 bytes. The standard + * Linux bus scan doesn't seem to set it. Octeon really has + * 128 byte lines, but Intel bridges get really upset if you + * try and set values above 64 bytes. Value is specified in + * 32bit words. + */ + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4); + /* Set latency timers for all devices */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); + + /* Enable reporting System errors and parity errors on all devices */ + /* Enable parity checking and error reporting */ + pci_read_config_word(dev, PCI_COMMAND, &config); + config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(dev, PCI_COMMAND, config); + + if (dev->subordinate) { + /* Set latency timers on sub bridges */ + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64); + /* More bridge error detection */ + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config); + config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config); + } + + /* Enable the PCIe normal error reporting */ + config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ + config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config); + + /* Find the Advanced Error Reporting capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Clear Uncorrectable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + dconfig); + /* Enable reporting of all uncorrectable errors */ + /* Uncorrectable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0); + /* + * Leave severity at HW default. This only controls if + * errors are reported as uncorrectable or + * correctable, not if the error is reported. + */ + /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */ + /* Clear Correctable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig); + /* Enable reporting of all correctable errors */ + /* Correctable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0); + /* Advanced Error Capabilities */ + pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig); + /* ECRC Generation Enable */ + if (config & PCI_ERR_CAP_ECRC_GENC) + config |= PCI_ERR_CAP_ECRC_GENE; + /* ECRC Check Enable */ + if (config & PCI_ERR_CAP_ECRC_CHKC) + config |= PCI_ERR_CAP_ECRC_CHKE; + pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig); + /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */ + /* Report all errors to the root complex */ + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, + PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + /* Clear the Root status register */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); + } + + return 0; +} + +/** + * Return the mapping of PCI device number to IRQ line. Each + * character in the return string represents the interrupt + * line for the device at that position. Device 1 maps to the + * first character, etc. The characters A-D are used for PCI + * interrupts. + * + * Returns PCI interrupt mapping + */ +const char *octeon_get_pci_interrupts(void) +{ + /* + * Returning an empty string causes the interrupts to be + * routed based on the PCI specification. From the PCI spec: + * + * INTA# of Device Number 0 is connected to IRQW on the system + * board. (Device Number has no significance regarding being + * located on the system board or in a connector.) INTA# of + * Device Number 1 is connected to IRQX on the system + * board. INTA# of Device Number 2 is connected to IRQY on the + * system board. INTA# of Device Number 3 is connected to IRQZ + * on the system board. The table below describes how each + * agent's INTx# lines are connected to the system board + * interrupt lines. The following equation can be used to + * determine to which INTx# signal on the system board a given + * device's INTx# line(s) is connected. + * + * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0, + * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I = + * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and + * INTD# = 3) + */ + if (of_machine_is_compatible("dlink,dsr-500n")) + return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"; + switch (octeon_bootinfo->board_type) { + case CVMX_BOARD_TYPE_NAO38: + /* This is really the NAC38 */ + return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_EBH3100: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_BBGW_REF: + return "AABCD"; + case CVMX_BOARD_TYPE_CUST_DSR1000N: + return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"; + case CVMX_BOARD_TYPE_THUNDER: + case CVMX_BOARD_TYPE_EBH3000: + default: + return ""; + } +} + +/** + * Map a PCI device to the appropriate interrupt line + * + * @dev: The Linux PCI device structure for the device to map + * @slot: The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @pin: The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * Returns Interrupt number for the device + */ +int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev, + u8 slot, u8 pin) +{ + int irq_num; + const char *interrupts; + int dev_num; + + /* Get the board specific interrupt mapping */ + interrupts = octeon_get_pci_interrupts(); + + dev_num = dev->devfn >> 3; + if (dev_num < strlen(interrupts)) + irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) + + OCTEON_IRQ_PCI_INT0; + else + irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0; + return irq_num; +} + + +/* + * Read a value from configuration space + */ +static int octeon_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + + switch (size) { + case 4: + *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 2: + *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 1: + *val = cvmx_read64_uint8(pci_addr.u64); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +/* + * Write a value to PCI configuration space + */ +static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + + switch (size) { + case 4: + cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val)); + return PCIBIOS_SUCCESSFUL; + case 2: + cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val)); + return PCIBIOS_SUCCESSFUL; + case 1: + cvmx_write64_uint8(pci_addr.u64, val); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +static struct pci_ops octeon_pci_ops = { + .read = octeon_read_config, + .write = octeon_write_config, +}; + +static struct resource octeon_pci_mem_resource = { + .start = 0, + .end = 0, + .name = "Octeon PCI MEM", + .flags = IORESOURCE_MEM, +}; + +/* + * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI + * bridge + */ +static struct resource octeon_pci_io_resource = { + .start = 0x4000, + .end = OCTEON_PCI_IOSPACE_SIZE - 1, + .name = "Octeon PCI IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pci_controller = { + .pci_ops = &octeon_pci_ops, + .mem_resource = &octeon_pci_mem_resource, + .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET, + .io_resource = &octeon_pci_io_resource, + .io_offset = 0, + .io_map_base = OCTEON_PCI_IOSPACE_BASE, +}; + + +/* + * Low level initialize the Octeon PCI controller + */ +static void octeon_pci_initialize(void) +{ + union cvmx_pci_cfg01 cfg01; + union cvmx_npi_ctl_status ctl_status; + union cvmx_pci_ctl_status_2 ctl_status_2; + union cvmx_pci_cfg19 cfg19; + union cvmx_pci_cfg16 cfg16; + union cvmx_pci_cfg22 cfg22; + union cvmx_pci_cfg56 cfg56; + + /* Reset the PCI Bus */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Hold PCI reset for 2 ms */ + + ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */ + ctl_status.s.max_word = 1; + ctl_status.s.timer = 1; + cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64); + + /* Deassert PCI reset and advertize PCX Host Mode Device Capability + (64b) */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Wait 2 ms after deasserting PCI reset */ + + ctl_status_2.u32 = 0; + ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set + before any PCI reads. */ + ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */ + ctl_status_2.s.bar2_enb = 1; + ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */ + ctl_status_2.s.bar2_esx = 1; + ctl_status_2.s.pmo_amod = 1; /* Round robin priority */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* BAR1 hole */ + ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS; + ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */ + ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */ + ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */ + ctl_status_2.s.bb1 = 1; /* BAR1 is big */ + ctl_status_2.s.bb0 = 1; /* BAR0 is big */ + } + + octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32); + udelay(2000); /* Wait 2 ms before doing PCI reads */ + + ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2); + pr_notice("PCI Status: %s %s-bit\n", + ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI", + ctl_status_2.s.ap_64ad ? "64" : "32"); + + if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + union cvmx_pci_cnt_reg cnt_reg_start; + union cvmx_pci_cnt_reg cnt_reg_end; + unsigned long cycles, pci_clock; + + cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount(); + udelay(1000); + cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount() - cycles; + pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) / + (cycles / (mips_hpt_frequency / 1000000)); + pr_notice("PCI Clock: %lu MHz\n", pci_clock); + } + + /* + * TDOMC must be set to one in PCI mode. TDOMC should be set to 4 + * in PCI-X mode to allow four outstanding splits. Otherwise, + * should not change from its reset value. Don't write PCI_CFG19 + * in PCI mode (0x82000001 reset value), write it to 0x82000004 + * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero. + * MRBCM -> must be one. + */ + if (ctl_status_2.s.ap_pcix) { + cfg19.u32 = 0; + /* + * Target Delayed/Split request outstanding maximum + * count. [1..31] and 0=32. NOTE: If the user + * programs these bits beyond the Designed Maximum + * outstanding count, then the designed maximum table + * depth will be used instead. No additional + * Deferred/Split transactions will be accepted if + * this outstanding maximum count is + * reached. Furthermore, no additional deferred/split + * transactions will be accepted if the I/O delay/ I/O + * Split Request outstanding maximum is reached. + */ + cfg19.s.tdomc = 4; + /* + * Master Deferred Read Request Outstanding Max Count + * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC + * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101 + * 5 2 110 6 3 111 7 3 For example, if these bits are + * programmed to 100, the core can support 2 DAC + * cycles, 4 SAC cycles or a combination of 1 DAC and + * 2 SAC cycles. NOTE: For the PCI-X maximum + * outstanding split transactions, refer to + * CRE0[22:20]. + */ + cfg19.s.mdrrmc = 2; + /* + * Master Request (Memory Read) Byte Count/Byte Enable + * select. 0 = Byte Enables valid. In PCI mode, a + * burst transaction cannot be performed using Memory + * Read command=4?h6. 1 = DWORD Byte Count valid + * (default). In PCI Mode, the memory read byte + * enables are automatically generated by the + * core. Note: N3 Master Request transaction sizes are + * always determined through the + * am_attr[<35:32>|<7:0>] field. + */ + cfg19.s.mrbcm = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32); + } + + + cfg01.u32 = 0; + cfg01.s.msae = 1; /* Memory Space Access Enable */ + cfg01.s.me = 1; /* Master Enable */ + cfg01.s.pee = 1; /* PERR# Enable */ + cfg01.s.see = 1; /* System Error Enable */ + cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); + +#ifdef USE_OCTEON_INTERNAL_ARBITER + /* + * When OCTEON is a PCI host, most systems will use OCTEON's + * internal arbiter, so must enable it before any PCI/PCI-X + * traffic can occur. + */ + { + union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg; + + pci_int_arb_cfg.u64 = 0; + pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */ + cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64); + } +#endif /* USE_OCTEON_INTERNAL_ARBITER */ + + /* + * Preferably written to 1 to set MLTD. [RDSATI,TRTAE, + * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to + * 1..7. + */ + cfg16.u32 = 0; + cfg16.s.mltd = 1; /* Master Latency Timer Disable */ + octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32); + + /* + * Should be written to 0x4ff00. MTTV -> must be zero. + * FLUSH -> must be 1. MRV -> should be 0xFF. + */ + cfg22.u32 = 0; + /* Master Retry Value [1..255] and 0=infinite */ + cfg22.s.mrv = 0xff; + /* + * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper + * N3K operation. + */ + cfg22.s.flush = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32); + + /* + * MOST Indicates the maximum number of outstanding splits (in -1 + * notation) when OCTEON is in PCI-X mode. PCI-X performance is + * affected by the MOST selection. Should generally be written + * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807, + * depending on the desired MOST of 3, 2, 1, or 0, respectively. + */ + cfg56.u32 = 0; + cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */ + cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */ + cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */ + cfg56.s.roe = 1; /* Relaxed Ordering Enable */ + cfg56.s.mmbc = 1; /* Maximum Memory Byte Count + [0=512B,1=1024B,2=2048B,3=4096B] */ + cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1 + .. 7=32] */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32); + + /* + * Affects PCI performance when OCTEON services reads to its + * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are + * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and + * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700, + * these values need to be changed so they won't possibly prefetch off + * of the end of memory if PCI is DMAing a buffer at the end of + * memory. Note that these values differ from their reset values. + */ + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31); +} + + +/* + * Initialize the Octeon PCI controller + */ +static int __init octeon_pci_setup(void) +{ + union cvmx_npi_mem_access_subidx mem_access; + int index; + + /* Only these chips have PCI */ + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + + /* Point pcibios_map_irq() to the PCI version of it */ + octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; + + /* Only use the big bars on chips that support it */ + if (OCTEON_IS_MODEL(OCTEON_CN31XX) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL; + else + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; + + /* PCI I/O and PCI MEM values */ + set_io_port_base(OCTEON_PCI_IOSPACE_BASE); + ioport_resource.start = 0; + ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1; + + pr_notice("%s Octeon big bar support\n", + (octeon_dma_bar_type == + OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling"); + + octeon_pci_initialize(); + + mem_access.u64 = 0; + mem_access.s.esr = 1; /* Endian-Swap on read. */ + mem_access.s.esw = 1; /* Endian-Swap on write. */ + mem_access.s.nsr = 0; /* No-Snoop on read. */ + mem_access.s.nsw = 0; /* No-Snoop on write. */ + mem_access.s.ror = 0; /* Relax Read on read. */ + mem_access.s.row = 0; /* Relax Order on write. */ + mem_access.s.ba = 0; /* PCI Address bits [63:36]. */ + cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64); + + /* + * Remap the Octeon BAR 2 above all 32 bit devices + * (0x8000000000ul). This is done here so it is remapped + * before the readl()'s below. We don't want BAR2 overlapping + * with BAR0/BAR1 during these reads. + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG08, + (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull)); + octeon_npi_write32(CVMX_NPI_PCI_CFG09, + (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32)); + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* Remap the Octeon BAR 0 to 0-2GB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* + * Remap the Octeon BAR 1 to map 2GB-4GB (minus the + * BAR 1 hole). + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* BAR1 movable mappings set for identity mapping */ + octeon_bar1_pci_phys = 0x80000000ull; + for (index = 0; index < 32; index++) { + union cvmx_pci_bar1_indexx bar1_index; + + bar1_index.u32 = 0; + /* Address bits[35:22] sent to L2C */ + bar1_index.s.addr_idx = + (octeon_bar1_pci_phys >> 22) + index; + /* Don't put PCI accesses in L2. */ + bar1_index.s.ca = 1; + /* Endian Swap Mode */ + bar1_index.s.end_swp = 1; + /* Set '1' when the selected address range is valid. */ + bar1_index.s.addr_v = 1; + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), + bar1_index.u32); + } + + /* Devices go after BAR1 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } else { + /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* Remap the Octeon BAR 1 to map 0-128MB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* BAR1 movable regions contiguous to cover the swiotlb */ + octeon_bar1_pci_phys = + io_tlb_default_mem.start & ~((1ull << 22) - 1); + + for (index = 0; index < 32; index++) { + union cvmx_pci_bar1_indexx bar1_index; + + bar1_index.u32 = 0; + /* Address bits[35:22] sent to L2C */ + bar1_index.s.addr_idx = + (octeon_bar1_pci_phys >> 22) + index; + /* Don't put PCI accesses in L2. */ + bar1_index.s.ca = 1; + /* Endian Swap Mode */ + bar1_index.s.end_swp = 1; + /* Set '1' when the selected address range is valid. */ + bar1_index.s.addr_v = 1; + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), + bar1_index.u32); + } + + /* Devices go after BAR0 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) + + (4ul << 10); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } + + register_pci_controller(&octeon_pci_controller); + + /* + * Clear any errors that might be pending from before the bus + * was setup properly. + */ + cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1); + + if (IS_ERR(platform_device_register_simple("octeon_pci_edac", + -1, NULL, 0))) + pr_err("Registration of co_pci_edac failed!\n"); + + octeon_pci_dma_init(); + + return 0; +} + +arch_initcall(octeon_pci_setup); diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c new file mode 100644 index 000000000..7f6ce6d73 --- /dev/null +++ b/arch/mips/pci/pci-rc32434.c @@ -0,0 +1,231 @@ +/* + * BRIEF MODULE DESCRIPTION + * PCI initialization for IDT EB434 board + * + * Copyright 2004 IDT Inc. (rischelp@idt.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include + +#include +#include + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +/* define an unsigned array for the PCI registers */ +static unsigned int korina_cnfg_regs[25] = { + KORINA_CNFG1, KORINA_CNFG2, KORINA_CNFG3, KORINA_CNFG4, + KORINA_CNFG5, KORINA_CNFG6, KORINA_CNFG7, KORINA_CNFG8, + KORINA_CNFG9, KORINA_CNFG10, KORINA_CNFG11, KORINA_CNFG12, + KORINA_CNFG13, KORINA_CNFG14, KORINA_CNFG15, KORINA_CNFG16, + KORINA_CNFG17, KORINA_CNFG18, KORINA_CNFG19, KORINA_CNFG20, + KORINA_CNFG21, KORINA_CNFG22, KORINA_CNFG23, KORINA_CNFG24 +}; +static struct resource rc32434_res_pci_mem1; +static struct resource rc32434_res_pci_mem2; + +static struct resource rc32434_res_pci_mem1 = { + .name = "PCI MEM1", + .start = 0x50000000, + .end = 0x5FFFFFFF, + .flags = IORESOURCE_MEM, + .sibling = NULL, + .child = &rc32434_res_pci_mem2 +}; + +static struct resource rc32434_res_pci_mem2 = { + .name = "PCI Mem2", + .start = 0x60000000, + .end = 0x6FFFFFFF, + .flags = IORESOURCE_MEM, + .parent = &rc32434_res_pci_mem1, + .sibling = NULL, + .child = NULL +}; + +static struct resource rc32434_res_pci_io1 = { + .name = "PCI I/O1", + .start = 0x18800000, + .end = 0x188FFFFF, + .flags = IORESOURCE_IO, +}; + +extern struct pci_ops rc32434_pci_ops; + +#define PCI_MEM1_START PCI_ADDR_START +#define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1) +#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN) +#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1) +#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN)) +#define PCI_IO1_END \ + (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1) +#define PCI_IO2_START \ + (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN) +#define PCI_IO2_END \ + (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1) + +struct pci_controller rc32434_controller2; + +struct pci_controller rc32434_controller = { + .pci_ops = &rc32434_pci_ops, + .mem_resource = &rc32434_res_pci_mem1, + .io_resource = &rc32434_res_pci_io1, + .mem_offset = 0, + .io_offset = 0, + +}; + +#ifdef __MIPSEB__ +#define PCI_ENDIAN_FLAG PCILBAC_sb_m +#else +#define PCI_ENDIAN_FLAG 0 +#endif + +static int __init rc32434_pcibridge_init(void) +{ + unsigned int pcicvalue, pcicdata = 0; + unsigned int dummyread, pcicntlval; + int loopCount; + unsigned int pci_config_addr; + + pcicvalue = rc32434_pci->pcic; + pcicvalue = (pcicvalue >> PCIM_SHFT) & PCIM_BIT_LEN; + if (!((pcicvalue == PCIM_H_EA) || + (pcicvalue == PCIM_H_IA_FIX) || + (pcicvalue == PCIM_H_IA_RR))) { + pr_err("PCI init error!!!\n"); + /* Not in Host Mode, return ERROR */ + return -1; + } + /* Enables the Idle Grant mode, Arbiter Parking */ + pcicdata |= (PCI_CTL_IGM | PCI_CTL_EAP | PCI_CTL_EN); + rc32434_pci->pcic = pcicdata; /* Enable the PCI bus Interface */ + /* Zero out the PCI status & PCI Status Mask */ + for (;;) { + pcicdata = rc32434_pci->pcis; + if (!(pcicdata & PCI_STAT_RIP)) + break; + } + + rc32434_pci->pcis = 0; + rc32434_pci->pcism = 0xFFFFFFFF; + /* Zero out the PCI decoupled registers */ + rc32434_pci->pcidac = 0; /* + * disable PCI decoupled accesses at + * initialization + */ + rc32434_pci->pcidas = 0; /* clear the status */ + rc32434_pci->pcidasm = 0x0000007F; /* Mask all the interrupts */ + /* Mask PCI Messaging Interrupts */ + rc32434_pci_msg->pciiic = 0; + rc32434_pci_msg->pciiim = 0xFFFFFFFF; + rc32434_pci_msg->pciioic = 0; + rc32434_pci_msg->pciioim = 0; + + + /* Setup PCILB0 as Memory Window */ + rc32434_pci->pcilba[0].address = (unsigned int) (PCI_ADDR_START); + + /* setup the PCI map address as same as the local address */ + + rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START); + + + /* Setup PCILBA1 as MEM */ + rc32434_pci->pcilba[0].control = + (((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG); + dummyread = rc32434_pci->pcilba[0].control; /* flush the CPU write Buffers */ + rc32434_pci->pcilba[1].address = 0x60000000; + rc32434_pci->pcilba[1].mapping = 0x60000000; + + /* setup PCILBA2 as IO Window */ + rc32434_pci->pcilba[1].control = + (((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG); + dummyread = rc32434_pci->pcilba[1].control; /* flush the CPU write Buffers */ + rc32434_pci->pcilba[2].address = 0x18C00000; + rc32434_pci->pcilba[2].mapping = 0x18FFFFFF; + + /* setup PCILBA2 as IO Window */ + rc32434_pci->pcilba[2].control = + (((SIZE_4MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG); + dummyread = rc32434_pci->pcilba[2].control; /* flush the CPU write Buffers */ + + /* Setup PCILBA3 as IO Window */ + rc32434_pci->pcilba[3].address = 0x18800000; + rc32434_pci->pcilba[3].mapping = 0x18800000; + rc32434_pci->pcilba[3].control = + ((((SIZE_1MB & 0x1ff) << PCI_LBAC_SIZE_BIT) | PCI_LBAC_MSI) | + PCI_ENDIAN_FLAG); + dummyread = rc32434_pci->pcilba[3].control; /* flush the CPU write Buffers */ + + pci_config_addr = (unsigned int) (0x80000004); + for (loopCount = 0; loopCount < 24; loopCount++) { + rc32434_pci->pcicfga = pci_config_addr; + dummyread = rc32434_pci->pcicfga; + rc32434_pci->pcicfgd = korina_cnfg_regs[loopCount]; + dummyread = rc32434_pci->pcicfgd; + pci_config_addr += 4; + } + rc32434_pci->pcitc = + (unsigned int) ((PCITC_RTIMER_VAL & 0xff) << PCI_TC_RTIMER_BIT) | + ((PCITC_DTIMER_VAL & 0xff) << PCI_TC_DTIMER_BIT); + + pcicntlval = rc32434_pci->pcic; + pcicntlval &= ~PCI_CTL_TNR; + rc32434_pci->pcic = pcicntlval; + pcicntlval = rc32434_pci->pcic; + + return 0; +} + +static int __init rc32434_pci_init(void) +{ + void __iomem *io_map_base; + + pr_info("PCI: Initializing PCI\n"); + + ioport_resource.start = rc32434_res_pci_io1.start; + ioport_resource.end = rc32434_res_pci_io1.end; + + rc32434_pcibridge_init(); + + io_map_base = ioremap(rc32434_res_pci_io1.start, + resource_size(&rc32434_res_pci_io1)); + + if (!io_map_base) + return -ENOMEM; + + rc32434_controller.io_map_base = + (unsigned long)io_map_base - rc32434_res_pci_io1.start; + + register_pci_controller(&rc32434_controller); + rc32434_sync(); + + return 0; +} + +arch_initcall(rc32434_pci_init); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c new file mode 100644 index 000000000..e9dd01431 --- /dev/null +++ b/arch/mips/pci/pci-rt2880.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Ralink RT288x SoC PCI register definitions + * + * Copyright (C) 2009 John Crispin + * Copyright (C) 2009 Gabor Juhos + * + * Parts of this file are based on Ralink's 2.6.21 BSP + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RT2880_PCI_BASE 0x00440000 +#define RT288X_CPU_IRQ_PCI 4 + +#define RT2880_PCI_MEM_BASE 0x20000000 +#define RT2880_PCI_MEM_SIZE 0x10000000 +#define RT2880_PCI_IO_BASE 0x00460000 +#define RT2880_PCI_IO_SIZE 0x00010000 + +#define RT2880_PCI_REG_PCICFG_ADDR 0x00 +#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c +#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10 +#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18 +#define RT2880_PCI_REG_CONFIG_ADDR 0x20 +#define RT2880_PCI_REG_CONFIG_DATA 0x24 +#define RT2880_PCI_REG_MEMBASE 0x28 +#define RT2880_PCI_REG_IOBASE 0x2c +#define RT2880_PCI_REG_ID 0x30 +#define RT2880_PCI_REG_CLASS 0x34 +#define RT2880_PCI_REG_SUBID 0x38 +#define RT2880_PCI_REG_ARBCTL 0x80 + +static void __iomem *rt2880_pci_base; + +static u32 rt2880_pci_reg_read(u32 reg) +{ + return readl(rt2880_pci_base + reg); +} + +static void rt2880_pci_reg_write(u32 val, u32 reg) +{ + writel(val, rt2880_pci_base + reg); +} + +static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, + unsigned int func, unsigned int where) +{ + return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | + 0x80000000); +} + +static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 address; + u32 data; + + address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + *val = (data >> ((where & 3) << 3)) & 0xffff; + break; + case 4: + *val = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 address; + u32 data; + + address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + + switch (size) { + case 1: + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 2: + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 4: + data = val; + break; + } + + rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops rt2880_pci_ops = { + .read = rt2880_pci_config_read, + .write = rt2880_pci_config_write, +}; + +static struct resource rt2880_pci_mem_resource = { + .name = "PCI MEM space", + .start = RT2880_PCI_MEM_BASE, + .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct resource rt2880_pci_io_resource = { + .name = "PCI IO space", + .start = RT2880_PCI_IO_BASE, + .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1, + .flags = IORESOURCE_IO, +}; + +static struct pci_controller rt2880_pci_controller = { + .pci_ops = &rt2880_pci_ops, + .mem_resource = &rt2880_pci_mem_resource, + .io_resource = &rt2880_pci_io_resource, +}; + +static inline u32 rt2880_pci_read_u32(unsigned long reg) +{ + u32 address; + u32 ret; + + address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); + + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + + return ret; +} + +static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) +{ + u32 address; + + address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); + + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + if (dev->bus->number != 0) + return irq; + + switch (PCI_SLOT(dev->devfn)) { + case 0x00: + break; + case 0x11: + irq = RT288X_CPU_IRQ_PCI; + break; + default: + pr_err("%s:%s[%d] trying to alloc unknown pci irq\n", + __FILE__, __func__, __LINE__); + BUG(); + break; + } + + return irq; +} + +static int rt288x_pci_probe(struct platform_device *pdev) +{ + void __iomem *io_map_base; + + rt2880_pci_base = ioremap(RT2880_PCI_BASE, PAGE_SIZE); + + io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE); + rt2880_pci_controller.io_map_base = (unsigned long) io_map_base; + set_io_port_base((unsigned long) io_map_base); + + ioport_resource.start = RT2880_PCI_IO_BASE; + ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; + + rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); + udelay(1); + + rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); + rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); + rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE); + rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE); + rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR); + rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID); + rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS); + rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID); + rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR); + + rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); + (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); + + rt2880_pci_controller.of_node = pdev->dev.of_node; + + register_pci_controller(&rt2880_pci_controller); + return 0; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + static bool slot0_init; + + /* + * Nobody seems to initialize slot 0, but this platform requires it, so + * do it once when some other slot is being enabled. The PCI subsystem + * should configure other slots properly, so no need to do anything + * special for those. + */ + if (!slot0_init && dev->bus->number == 0) { + u16 cmd; + u32 bar0; + + slot0_init = true; + + pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + 0x08000000); + pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + &bar0); + + pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd); + } + + return 0; +} + +static const struct of_device_id rt288x_pci_match[] = { + { .compatible = "ralink,rt288x-pci" }, + {}, +}; + +static struct platform_driver rt288x_pci_driver = { + .probe = rt288x_pci_probe, + .driver = { + .name = "rt288x-pci", + .of_match_table = rt288x_pci_match, + }, +}; + +int __init pcibios_init(void) +{ + int ret = platform_driver_register(&rt288x_pci_driver); + + if (ret) + pr_info("rt288x-pci: Error registering platform driver!"); + + return ret; +} + +arch_initcall(pcibios_init); diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c new file mode 100644 index 000000000..e07ae098b --- /dev/null +++ b/arch/mips/pci/pci-rt3883.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Ralink RT3662/RT3883 SoC PCI support + * + * Copyright (C) 2011-2013 Gabor Juhos + * + * Parts of this file are based on Ralink's 2.6.21 BSP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RT3883_MEMORY_BASE 0x00000000 +#define RT3883_MEMORY_SIZE 0x02000000 + +#define RT3883_PCI_REG_PCICFG 0x00 +#define RT3883_PCICFG_P2P_BR_DEVNUM_M 0xf +#define RT3883_PCICFG_P2P_BR_DEVNUM_S 16 +#define RT3883_PCICFG_PCIRST BIT(1) +#define RT3883_PCI_REG_PCIRAW 0x04 +#define RT3883_PCI_REG_PCIINT 0x08 +#define RT3883_PCI_REG_PCIENA 0x0c + +#define RT3883_PCI_REG_CFGADDR 0x20 +#define RT3883_PCI_REG_CFGDATA 0x24 +#define RT3883_PCI_REG_MEMBASE 0x28 +#define RT3883_PCI_REG_IOBASE 0x2c +#define RT3883_PCI_REG_ARBCTL 0x80 + +#define RT3883_PCI_REG_BASE(_x) (0x1000 + (_x) * 0x1000) +#define RT3883_PCI_REG_BAR0SETUP(_x) (RT3883_PCI_REG_BASE((_x)) + 0x10) +#define RT3883_PCI_REG_IMBASEBAR0(_x) (RT3883_PCI_REG_BASE((_x)) + 0x18) +#define RT3883_PCI_REG_ID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x30) +#define RT3883_PCI_REG_CLASS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x34) +#define RT3883_PCI_REG_SUBID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x38) +#define RT3883_PCI_REG_STATUS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x50) + +#define RT3883_PCI_MODE_NONE 0 +#define RT3883_PCI_MODE_PCI BIT(0) +#define RT3883_PCI_MODE_PCIE BIT(1) +#define RT3883_PCI_MODE_BOTH (RT3883_PCI_MODE_PCI | RT3883_PCI_MODE_PCIE) + +#define RT3883_PCI_IRQ_COUNT 32 + +#define RT3883_P2P_BR_DEVNUM 1 + +struct rt3883_pci_controller { + void __iomem *base; + + struct device_node *intc_of_node; + struct irq_domain *irq_domain; + + struct pci_controller pci_controller; + struct resource io_res; + struct resource mem_res; + + bool pcie_ready; +}; + +static inline struct rt3883_pci_controller * +pci_bus_to_rt3883_controller(struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = (struct pci_controller *) bus->sysdata; + return container_of(hose, struct rt3883_pci_controller, pci_controller); +} + +static inline u32 rt3883_pci_r32(struct rt3883_pci_controller *rpc, + unsigned reg) +{ + return ioread32(rpc->base + reg); +} + +static inline void rt3883_pci_w32(struct rt3883_pci_controller *rpc, + u32 val, unsigned reg) +{ + iowrite32(val, rpc->base + reg); +} + +static inline u32 rt3883_pci_get_cfgaddr(unsigned int bus, unsigned int slot, + unsigned int func, unsigned int where) +{ + return (bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | + 0x80000000; +} + +static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc, + unsigned bus, unsigned slot, + unsigned func, unsigned reg) +{ + u32 address; + + address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); + + rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); + + return rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); +} + +static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, + unsigned bus, unsigned slot, + unsigned func, unsigned reg, u32 val) +{ + u32 address; + + address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); + + rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); + rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); +} + +static void rt3883_pci_irq_handler(struct irq_desc *desc) +{ + struct rt3883_pci_controller *rpc; + u32 pending; + + rpc = irq_desc_get_handler_data(desc); + + pending = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIINT) & + rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA); + + if (!pending) { + spurious_interrupt(); + return; + } + + while (pending) { + unsigned bit = __ffs(pending); + + generic_handle_domain_irq(rpc->irq_domain, bit); + + pending &= ~BIT(bit); + } +} + +static void rt3883_pci_irq_unmask(struct irq_data *d) +{ + struct rt3883_pci_controller *rpc; + u32 t; + + rpc = irq_data_get_irq_chip_data(d); + + t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA); + rt3883_pci_w32(rpc, t | BIT(d->hwirq), RT3883_PCI_REG_PCIENA); + /* flush write */ + rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA); +} + +static void rt3883_pci_irq_mask(struct irq_data *d) +{ + struct rt3883_pci_controller *rpc; + u32 t; + + rpc = irq_data_get_irq_chip_data(d); + + t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA); + rt3883_pci_w32(rpc, t & ~BIT(d->hwirq), RT3883_PCI_REG_PCIENA); + /* flush write */ + rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA); +} + +static struct irq_chip rt3883_pci_irq_chip = { + .name = "RT3883 PCI", + .irq_mask = rt3883_pci_irq_mask, + .irq_unmask = rt3883_pci_irq_unmask, + .irq_mask_ack = rt3883_pci_irq_mask, +}; + +static int rt3883_pci_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &rt3883_pci_irq_chip, handle_level_irq); + irq_set_chip_data(irq, d->host_data); + + return 0; +} + +static const struct irq_domain_ops rt3883_pci_irq_domain_ops = { + .map = rt3883_pci_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int rt3883_pci_irq_init(struct device *dev, + struct rt3883_pci_controller *rpc) +{ + int irq; + + irq = irq_of_parse_and_map(rpc->intc_of_node, 0); + if (irq == 0) { + dev_err(dev, "%pOF has no IRQ", rpc->intc_of_node); + return -EINVAL; + } + + /* disable all interrupts */ + rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA); + + rpc->irq_domain = + irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT, + &rt3883_pci_irq_domain_ops, + rpc); + if (!rpc->irq_domain) { + dev_err(dev, "unable to add IRQ domain\n"); + return -ENODEV; + } + + irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc); + + return 0; +} + +static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct rt3883_pci_controller *rpc; + u32 address; + u32 data; + + rpc = pci_bus_to_rt3883_controller(bus); + + if (!rpc->pcie_ready && bus->number == 1) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); + data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + *val = (data >> ((where & 3) << 3)) & 0xffff; + break; + case 4: + *val = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rt3883_pci_controller *rpc; + u32 address; + u32 data; + + rpc = pci_bus_to_rt3883_controller(bus); + + if (!rpc->pcie_ready && bus->number == 1) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); + data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); + + switch (size) { + case 1: + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 2: + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 4: + data = val; + break; + } + + rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops rt3883_pci_ops = { + .read = rt3883_pci_config_read, + .write = rt3883_pci_config_write, +}; + +static void rt3883_pci_preinit(struct rt3883_pci_controller *rpc, unsigned mode) +{ + u32 syscfg1; + u32 rstctrl; + u32 clkcfg1; + u32 t; + + rstctrl = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL); + syscfg1 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); + clkcfg1 = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1); + + if (mode & RT3883_PCI_MODE_PCIE) { + rstctrl |= RT3883_RSTCTRL_PCIE; + rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL); + + /* setup PCI PAD drive mode */ + syscfg1 &= ~(0x30); + syscfg1 |= (2 << 4); + rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1); + + t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0); + t &= ~BIT(31); + rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0); + + t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1); + t &= 0x80ffffff; + rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1); + + t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1); + t |= 0xa << 24; + rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1); + + t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0); + t |= BIT(31); + rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0); + + msleep(50); + + rstctrl &= ~RT3883_RSTCTRL_PCIE; + rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL); + } + + syscfg1 |= (RT3883_SYSCFG1_PCIE_RC_MODE | RT3883_SYSCFG1_PCI_HOST_MODE); + + clkcfg1 &= ~(RT3883_CLKCFG1_PCI_CLK_EN | RT3883_CLKCFG1_PCIE_CLK_EN); + + if (mode & RT3883_PCI_MODE_PCI) { + clkcfg1 |= RT3883_CLKCFG1_PCI_CLK_EN; + rstctrl &= ~RT3883_RSTCTRL_PCI; + } + + if (mode & RT3883_PCI_MODE_PCIE) { + clkcfg1 |= RT3883_CLKCFG1_PCIE_CLK_EN; + rstctrl &= ~RT3883_RSTCTRL_PCIE; + } + + rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1); + rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL); + rt_sysc_w32(clkcfg1, RT3883_SYSC_REG_CLKCFG1); + + msleep(500); + + /* + * setup the device number of the P2P bridge + * and de-assert the reset line + */ + t = (RT3883_P2P_BR_DEVNUM << RT3883_PCICFG_P2P_BR_DEVNUM_S); + rt3883_pci_w32(rpc, t, RT3883_PCI_REG_PCICFG); + + /* flush write */ + rt3883_pci_r32(rpc, RT3883_PCI_REG_PCICFG); + msleep(500); + + if (mode & RT3883_PCI_MODE_PCIE) { + msleep(500); + + t = rt3883_pci_r32(rpc, RT3883_PCI_REG_STATUS(1)); + + rpc->pcie_ready = t & BIT(0); + + if (!rpc->pcie_ready) { + /* reset the PCIe block */ + t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL); + t |= RT3883_RSTCTRL_PCIE; + rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL); + t &= ~RT3883_RSTCTRL_PCIE; + rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL); + + /* turn off PCIe clock */ + t = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1); + t &= ~RT3883_CLKCFG1_PCIE_CLK_EN; + rt_sysc_w32(t, RT3883_SYSC_REG_CLKCFG1); + + t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0); + t &= ~0xf000c080; + rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0); + } + } + + /* enable PCI arbiter */ + rt3883_pci_w32(rpc, 0x79, RT3883_PCI_REG_ARBCTL); +} + +static int rt3883_pci_probe(struct platform_device *pdev) +{ + struct rt3883_pci_controller *rpc; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct device_node *child; + u32 val; + int err; + int mode; + + rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL); + if (!rpc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rpc->base = devm_ioremap_resource(dev, res); + if (IS_ERR(rpc->base)) + return PTR_ERR(rpc->base); + + /* find the interrupt controller child node */ + for_each_child_of_node(np, child) { + if (of_get_property(child, "interrupt-controller", NULL)) { + rpc->intc_of_node = child; + break; + } + } + + if (!rpc->intc_of_node) { + dev_err(dev, "%pOF has no %s child node", + np, "interrupt controller"); + return -EINVAL; + } + + /* find the PCI host bridge child node */ + for_each_child_of_node(np, child) { + if (of_node_is_type(child, "pci")) { + rpc->pci_controller.of_node = child; + break; + } + } + + if (!rpc->pci_controller.of_node) { + dev_err(dev, "%pOF has no %s child node", + np, "PCI host bridge"); + err = -EINVAL; + goto err_put_intc_node; + } + + mode = RT3883_PCI_MODE_NONE; + for_each_available_child_of_node(rpc->pci_controller.of_node, child) { + int devfn; + + if (!of_node_is_type(child, "pci")) + continue; + + devfn = of_pci_get_devfn(child); + if (devfn < 0) + continue; + + switch (PCI_SLOT(devfn)) { + case 1: + mode |= RT3883_PCI_MODE_PCIE; + break; + + case 17: + case 18: + mode |= RT3883_PCI_MODE_PCI; + break; + } + } + + if (mode == RT3883_PCI_MODE_NONE) { + dev_err(dev, "unable to determine PCI mode\n"); + err = -EINVAL; + goto err_put_hb_node; + } + + dev_info(dev, "mode:%s%s\n", + (mode & RT3883_PCI_MODE_PCI) ? " PCI" : "", + (mode & RT3883_PCI_MODE_PCIE) ? " PCIe" : ""); + + rt3883_pci_preinit(rpc, mode); + + rpc->pci_controller.pci_ops = &rt3883_pci_ops; + rpc->pci_controller.io_resource = &rpc->io_res; + rpc->pci_controller.mem_resource = &rpc->mem_res; + + /* Load PCI I/O and memory resources from DT */ + pci_load_of_ranges(&rpc->pci_controller, + rpc->pci_controller.of_node); + + rt3883_pci_w32(rpc, rpc->mem_res.start, RT3883_PCI_REG_MEMBASE); + rt3883_pci_w32(rpc, rpc->io_res.start, RT3883_PCI_REG_IOBASE); + + ioport_resource.start = rpc->io_res.start; + ioport_resource.end = rpc->io_res.end; + + /* PCI */ + rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(0)); + rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(0)); + rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(0)); + rt3883_pci_w32(rpc, 0x00800001, RT3883_PCI_REG_CLASS(0)); + rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(0)); + + /* PCIe */ + rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(1)); + rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(1)); + rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(1)); + rt3883_pci_w32(rpc, 0x06040001, RT3883_PCI_REG_CLASS(1)); + rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(1)); + + err = rt3883_pci_irq_init(dev, rpc); + if (err) + goto err_put_hb_node; + + /* PCIe */ + val = rt3883_pci_read_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND); + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + rt3883_pci_write_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND, val); + + /* PCI */ + val = rt3883_pci_read_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND); + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + rt3883_pci_write_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND, val); + + if (mode == RT3883_PCI_MODE_PCIE) { + rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(0)); + rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(1)); + + rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0, + PCI_BASE_ADDRESS_0, + RT3883_MEMORY_BASE); + /* flush write */ + rt3883_pci_read_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0, + PCI_BASE_ADDRESS_0); + } else { + rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0, + PCI_IO_BASE, 0x00000101); + } + + register_pci_controller(&rpc->pci_controller); + + return 0; + +err_put_hb_node: + of_node_put(rpc->pci_controller.of_node); +err_put_intc_node: + of_node_put(rpc->intc_of_node); + return err; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static const struct of_device_id rt3883_pci_ids[] = { + { .compatible = "ralink,rt3883-pci" }, + {}, +}; + +static struct platform_driver rt3883_pci_driver = { + .probe = rt3883_pci_probe, + .driver = { + .name = "rt3883-pci", + .of_match_table = of_match_ptr(rt3883_pci_ids), + }, +}; + +static int __init rt3883_pci_init(void) +{ + return platform_driver_register(&rt3883_pci_driver); +} + +postcore_initcall(rt3883_pci_init); diff --git a/arch/mips/pci/pci-sb1250.c b/arch/mips/pci/pci-sb1250.c new file mode 100644 index 000000000..c3f82b280 --- /dev/null +++ b/arch/mips/pci/pci-sb1250.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2001,2002,2003 Broadcom Corporation + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + */ + +/* + * BCM1250-specific PCI support + * + * This module provides the glue between Linux's PCI subsystem + * and the hardware. We basically provide glue for accessing + * configuration space, and set up the translation for I/O + * space accesses. + * + * To access configuration space, we use ioremap. In the 32-bit + * kernel, this consumes either 4 or 8 page table pages, and 16MB of + * kernel mapped memory. Hopefully neither of these should be a huge + * problem. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/* + * Macros for calculating offsets into config space given a device + * structure or dev/fun/reg + */ +#define CFGOFFSET(bus, devfn, where) (((bus)<<16) + ((devfn)<<8) + (where)) +#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where) + +static void *cfg_space; + +#define PCI_BUS_ENABLED 1 +#define LDT_BUS_ENABLED 2 +#define PCI_DEVICE_MODE 4 + +static int sb1250_bus_status; + +#define PCI_BRIDGE_DEVICE 0 +#define LDT_BRIDGE_DEVICE 1 + +#ifdef CONFIG_SIBYTE_HAS_LDT +/* + * HT's level-sensitive interrupts require EOI, which is generated + * through a 4MB memory-mapped region + */ +unsigned long ldt_eoi_space; +#endif + +/* + * Read/write 32-bit values in config space. + */ +static inline u32 READCFG32(u32 addr) +{ + return *(u32 *) (cfg_space + (addr & ~3)); +} + +static inline void WRITECFG32(u32 addr, u32 data) +{ + *(u32 *) (cfg_space + (addr & ~3)) = data; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return dev->irq; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +/* + * Some checks before doing config cycles: + * In PCI Device Mode, hide everything on bus 0 except the LDT host + * bridge. Otherwise, access is controlled by bridge MasterEn bits. + */ +static int sb1250_pci_can_access(struct pci_bus *bus, int devfn) +{ + u32 devno; + + if (!(sb1250_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE))) + return 0; + + if (bus->number == 0) { + devno = PCI_SLOT(devfn); + if (devno == LDT_BRIDGE_DEVICE) + return (sb1250_bus_status & LDT_BUS_ENABLED) != 0; + else if (sb1250_bus_status & PCI_DEVICE_MODE) + return 0; + else + return 1; + } else + return 1; +} + +/* + * Read/write access functions for various sizes of values + * in config space. Return all 1's for disallowed accesses + * for a kludgy but adequate simulation of master aborts. + */ + +static int sb1250_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (sb1250_pci_can_access(bus, devfn)) + data = READCFG32(CFGADDR(bus, devfn, where)); + else + data = 0xFFFFFFFF; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +static int sb1250_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 cfgaddr = CFGADDR(bus, devfn, where); + u32 data = 0; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!sb1250_pci_can_access(bus, devfn)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + data = READCFG32(cfgaddr); + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else + data = val; + + WRITECFG32(cfgaddr, data); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops sb1250_pci_ops = { + .read = sb1250_pcibios_read, + .write = sb1250_pcibios_write, +}; + +static struct resource sb1250_mem_resource = { + .name = "SB1250 PCI MEM", + .start = 0x40000000UL, + .end = 0x5fffffffUL, + .flags = IORESOURCE_MEM, +}; + +static struct resource sb1250_io_resource = { + .name = "SB1250 PCI I/O", + .start = 0x00000000UL, + .end = 0x01ffffffUL, + .flags = IORESOURCE_IO, +}; + +struct pci_controller sb1250_controller = { + .pci_ops = &sb1250_pci_ops, + .mem_resource = &sb1250_mem_resource, + .io_resource = &sb1250_io_resource, +}; + +static int __init sb1250_pcibios_init(void) +{ + void __iomem *io_map_base; + uint32_t cmdreg; + uint64_t reg; + + /* CFE will assign PCI resources */ + pci_set_flags(PCI_PROBE_ONLY); + + /* Avoid ISA compat ranges. */ + PCIBIOS_MIN_IO = 0x00008000UL; + PCIBIOS_MIN_MEM = 0x01000000UL; + + /* Set I/O resource limits. */ + ioport_resource.end = 0x01ffffffUL; /* 32MB accessible by sb1250 */ + iomem_resource.end = 0xffffffffUL; /* no HT support yet */ + + cfg_space = + ioremap(A_PHYS_LDTPCI_CFG_MATCH_BITS, 16 * 1024 * 1024); + + /* + * See if the PCI bus has been configured by the firmware. + */ + reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG)); + if (!(reg & M_SYS_PCI_HOST)) { + sb1250_bus_status |= PCI_DEVICE_MODE; + } else { + cmdreg = + READCFG32(CFGOFFSET + (0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), + PCI_COMMAND)); + if (!(cmdreg & PCI_COMMAND_MASTER)) { + printk + ("PCI: Skipping PCI probe. Bus is not initialized.\n"); + iounmap(cfg_space); + return 0; + } + sb1250_bus_status |= PCI_BUS_ENABLED; + } + + /* + * Establish mappings in KSEG2 (kernel virtual) to PCI I/O + * space. Use "match bytes" policy to make everything look + * little-endian. So, you need to also set + * CONFIG_SWAP_IO_SPACE, but this is the combination that + * works correctly with most of Linux's drivers. + * XXX ehs: Should this happen in PCI Device mode? + */ + io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024); + sb1250_controller.io_map_base = (unsigned long)io_map_base; + set_io_port_base((unsigned long)io_map_base); + +#ifdef CONFIG_SIBYTE_HAS_LDT + /* + * Also check the LDT bridge's enable, just in case we didn't + * initialize that one. + */ + + cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(LDT_BRIDGE_DEVICE, 0), + PCI_COMMAND)); + if (cmdreg & PCI_COMMAND_MASTER) { + sb1250_bus_status |= LDT_BUS_ENABLED; + + /* + * Need bits 23:16 to convey vector number. Note that + * this consumes 4MB of kernel-mapped memory + * (Kseg2/Kseg3) for 32-bit kernel. + */ + ldt_eoi_space = (unsigned long) + ioremap(A_PHYS_LDT_SPECIAL_MATCH_BYTES, + 4 * 1024 * 1024); + } +#endif + + register_pci_controller(&sb1250_controller); + +#ifdef CONFIG_VGA_CONSOLE + console_lock(); + do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1); + console_unlock(); +#endif + return 0; +} +arch_initcall(sb1250_pcibios_init); diff --git a/arch/mips/pci/pci-tx4927.c b/arch/mips/pci/pci-tx4927.c new file mode 100644 index 000000000..9b3301d19 --- /dev/null +++ b/arch/mips/pci/pci-tx4927.c @@ -0,0 +1,91 @@ +/* + * Based on linux/arch/mips/txx9/rbtx4938/setup.c, + * and RBTX49xx patch from CELF patch archive. + * + * Copyright 2001, 2003-2005 MontaVista Software Inc. + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +int __init tx4927_report_pciclk(void) +{ + int pciclk = 0; + + pr_info("PCIC --%s PCICLK:", + (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ? + " PCI66" : ""); + if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) { + u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg); + switch ((unsigned long)ccfg & + TX4927_CCFG_PCIDIVMODE_MASK) { + case TX4927_CCFG_PCIDIVMODE_2_5: + pciclk = txx9_cpu_clock * 2 / 5; break; + case TX4927_CCFG_PCIDIVMODE_3: + pciclk = txx9_cpu_clock / 3; break; + case TX4927_CCFG_PCIDIVMODE_5: + pciclk = txx9_cpu_clock / 5; break; + case TX4927_CCFG_PCIDIVMODE_6: + pciclk = txx9_cpu_clock / 6; break; + } + pr_cont("Internal(%u.%uMHz)", + (pciclk + 50000) / 1000000, + ((pciclk + 50000) / 100000) % 10); + } else { + pr_cont("External"); + pciclk = -1; + } + pr_cont("\n"); + return pciclk; +} + +int __init tx4927_pciclk66_setup(void) +{ + int pciclk; + + /* Assert M66EN */ + tx4927_ccfg_set(TX4927_CCFG_PCI66); + /* Double PCICLK (if possible) */ + if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) { + unsigned int pcidivmode = 0; + u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg); + pcidivmode = (unsigned long)ccfg & + TX4927_CCFG_PCIDIVMODE_MASK; + switch (pcidivmode) { + case TX4927_CCFG_PCIDIVMODE_5: + case TX4927_CCFG_PCIDIVMODE_2_5: + pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5; + pciclk = txx9_cpu_clock * 2 / 5; + break; + case TX4927_CCFG_PCIDIVMODE_6: + case TX4927_CCFG_PCIDIVMODE_3: + default: + pcidivmode = TX4927_CCFG_PCIDIVMODE_3; + pciclk = txx9_cpu_clock / 3; + } + tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK, + pcidivmode); + pr_debug("PCICLK: ccfg:%08lx\n", + (unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg)); + } else + pciclk = -1; + return pciclk; +} + +void __init tx4927_setup_pcierr_irq(void) +{ + if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR, + tx4927_pcierr_interrupt, + 0, "PCI error", + (void *)TX4927_PCIC_REG)) + pr_warn("Failed to request irq for PCIERR\n"); +} diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c new file mode 100644 index 000000000..a6418460e --- /dev/null +++ b/arch/mips/pci/pci-tx4938.c @@ -0,0 +1,142 @@ +/* + * Based on linux/arch/mips/txx9/rbtx4938/setup.c, + * and RBTX49xx patch from CELF patch archive. + * + * Copyright 2001, 2003-2005 MontaVista Software Inc. + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +int __init tx4938_report_pciclk(void) +{ + int pciclk = 0; + + pr_info("PCIC --%s PCICLK:", + (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) ? + " PCI66" : ""); + if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) { + u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); + switch ((unsigned long)ccfg & + TX4938_CCFG_PCIDIVMODE_MASK) { + case TX4938_CCFG_PCIDIVMODE_4: + pciclk = txx9_cpu_clock / 4; break; + case TX4938_CCFG_PCIDIVMODE_4_5: + pciclk = txx9_cpu_clock * 2 / 9; break; + case TX4938_CCFG_PCIDIVMODE_5: + pciclk = txx9_cpu_clock / 5; break; + case TX4938_CCFG_PCIDIVMODE_5_5: + pciclk = txx9_cpu_clock * 2 / 11; break; + case TX4938_CCFG_PCIDIVMODE_8: + pciclk = txx9_cpu_clock / 8; break; + case TX4938_CCFG_PCIDIVMODE_9: + pciclk = txx9_cpu_clock / 9; break; + case TX4938_CCFG_PCIDIVMODE_10: + pciclk = txx9_cpu_clock / 10; break; + case TX4938_CCFG_PCIDIVMODE_11: + pciclk = txx9_cpu_clock / 11; break; + } + pr_cont("Internal(%u.%uMHz)", + (pciclk + 50000) / 1000000, + ((pciclk + 50000) / 100000) % 10); + } else { + pr_cont("External"); + pciclk = -1; + } + pr_cont("\n"); + return pciclk; +} + +void __init tx4938_report_pci1clk(void) +{ + __u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); + unsigned int pciclk = + txx9_gbus_clock / ((ccfg & TX4938_CCFG_PCI1DMD) ? 4 : 2); + + pr_info("PCIC1 -- %sPCICLK:%u.%uMHz\n", + (ccfg & TX4938_CCFG_PCI1_66) ? "PCI66 " : "", + (pciclk + 50000) / 1000000, + ((pciclk + 50000) / 100000) % 10); +} + +int __init tx4938_pciclk66_setup(void) +{ + int pciclk; + + /* Assert M66EN */ + tx4938_ccfg_set(TX4938_CCFG_PCI66); + /* Double PCICLK (if possible) */ + if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) { + unsigned int pcidivmode = 0; + u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); + pcidivmode = (unsigned long)ccfg & + TX4938_CCFG_PCIDIVMODE_MASK; + switch (pcidivmode) { + case TX4938_CCFG_PCIDIVMODE_8: + case TX4938_CCFG_PCIDIVMODE_4: + pcidivmode = TX4938_CCFG_PCIDIVMODE_4; + pciclk = txx9_cpu_clock / 4; + break; + case TX4938_CCFG_PCIDIVMODE_9: + case TX4938_CCFG_PCIDIVMODE_4_5: + pcidivmode = TX4938_CCFG_PCIDIVMODE_4_5; + pciclk = txx9_cpu_clock * 2 / 9; + break; + case TX4938_CCFG_PCIDIVMODE_10: + case TX4938_CCFG_PCIDIVMODE_5: + pcidivmode = TX4938_CCFG_PCIDIVMODE_5; + pciclk = txx9_cpu_clock / 5; + break; + case TX4938_CCFG_PCIDIVMODE_11: + case TX4938_CCFG_PCIDIVMODE_5_5: + default: + pcidivmode = TX4938_CCFG_PCIDIVMODE_5_5; + pciclk = txx9_cpu_clock * 2 / 11; + break; + } + tx4938_ccfg_change(TX4938_CCFG_PCIDIVMODE_MASK, + pcidivmode); + pr_debug("PCICLK: ccfg:%08lx\n", + (unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg)); + } else + pciclk = -1; + return pciclk; +} + +int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) +{ + if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { + switch (slot) { + case TX4927_PCIC_IDSEL_AD_TO_SLOT(31): + if (__raw_readq(&tx4938_ccfgptr->pcfg) & + TX4938_PCFG_ETH0_SEL) + return TXX9_IRQ_BASE + TX4938_IR_ETH0; + break; + case TX4927_PCIC_IDSEL_AD_TO_SLOT(30): + if (__raw_readq(&tx4938_ccfgptr->pcfg) & + TX4938_PCFG_ETH1_SEL) + return TXX9_IRQ_BASE + TX4938_IR_ETH1; + break; + } + return 0; + } + return -1; +} + +void __init tx4938_setup_pcierr_irq(void) +{ + if (request_irq(TXX9_IRQ_BASE + TX4938_IR_PCIERR, + tx4927_pcierr_interrupt, + 0, "PCI error", + (void *)TX4927_PCIC_REG)) + pr_warn("Failed to request irq for PCIERR\n"); +} diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c new file mode 100644 index 000000000..ab9bedb82 --- /dev/null +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -0,0 +1,760 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2003 Christoph Hellwig (hch@lst.de) + * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CRC16_INIT 0 +#define CRC16_VALID 0xb001 + +/* + * Common phys<->dma mapping for platforms using pci xtalk bridge + */ +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); + + return bc->baddr + paddr; +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr & ~(0xffUL << 56); +} + +/* + * Most of the IOC3 PCI config register aren't present + * we emulate what is needed for a normal PCI enumeration + */ +static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value, u32 sid) +{ + u32 cf, shift, mask; + + switch (where & ~3) { + case 0x00 ... 0x10: + case 0x40 ... 0x44: + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + break; + case 0x2c: + cf = sid; + break; + case 0x3c: + /* emulate sane interrupt pin value */ + cf = 0x00000100; + break; + default: + cf = 0; + break; + } + shift = (where & 3) << 3; + mask = 0xffffffffU >> ((4 - size) << 3); + *value = (cf >> shift) & mask; + + return PCIBIOS_SUCCESSFUL; +} + +static int ioc3_cfg_wr(void *addr, int where, int size, u32 value) +{ + u32 cf, shift, mask, smask; + + if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) + return PCIBIOS_SUCCESSFUL; + + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + shift = ((where & 3) << 3); + mask = (0xffffffffU >> ((4 - size) << 3)); + smask = mask << shift; + + cf = (cf & ~smask) | ((value & mask) << shift); + if (put_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +static void bridge_disable_swapping(struct pci_dev *dev) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); + int slot = PCI_SLOT(dev->devfn); + + /* Turn off byte swapping */ + bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR); + bridge_read(bc, b_widget.w_tflush); /* Flush */ +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, + bridge_disable_swapping); + + +/* + * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is + * not really documented, so right now I can't write code which uses it. + * Therefore we use type 0 accesses for now even though they won't work + * correctly for PCI-to-PCI bridges. + * + * The function is complicated by the ultimate brokenness of the IOC3 chip + * which is used in SGI systems. The IOC3 can only handle 32-bit PCI + * accesses and does only decode parts of it's address space. + */ +static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct bridge_regs *bridge = bc->base; + int slot = PCI_SLOT(devfn); + int fn = PCI_FUNC(devfn); + void *addr; + u32 cf; + int res; + + addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * IOC3 is broken beyond belief ... Don't even give the + * generic PCI code a chance to look at it for real ... + */ + if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) { + addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; + return ioc3_cfg_rd(addr, where, size, value, + bc->ioc3_sid[slot]); + } + + addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; + + if (size == 1) + res = get_dbe(*value, (u8 *)addr); + else if (size == 2) + res = get_dbe(*value, (u16 *)addr); + else + res = get_dbe(*value, (u32 *)addr); + + return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct bridge_regs *bridge = bc->base; + int busno = bus->number; + int slot = PCI_SLOT(devfn); + int fn = PCI_FUNC(devfn); + void *addr; + u32 cf; + int res; + + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); + addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * IOC3 is broken beyond belief ... Don't even give the + * generic PCI code a chance to look at it for real ... + */ + if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) { + addr = &bridge->b_type1_cfg.c[(fn << 8) | (where & ~3)]; + return ioc3_cfg_rd(addr, where, size, value, + bc->ioc3_sid[slot]); + } + + addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; + + if (size == 1) + res = get_dbe(*value, (u8 *)addr); + else if (size == 2) + res = get_dbe(*value, (u16 *)addr); + else + res = get_dbe(*value, (u32 *)addr); + + return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static int pci_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (!pci_is_root_bus(bus)) + return pci_conf1_read_config(bus, devfn, where, size, value); + + return pci_conf0_read_config(bus, devfn, where, size, value); +} + +static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct bridge_regs *bridge = bc->base; + int slot = PCI_SLOT(devfn); + int fn = PCI_FUNC(devfn); + void *addr; + u32 cf; + int res; + + addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * IOC3 is broken beyond belief ... Don't even give the + * generic PCI code a chance to look at it for real ... + */ + if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) { + addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; + return ioc3_cfg_wr(addr, where, size, value); + } + + addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; + + if (size == 1) + res = put_dbe(value, (u8 *)addr); + else if (size == 2) + res = put_dbe(value, (u16 *)addr); + else + res = put_dbe(value, (u32 *)addr); + + if (res) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct bridge_regs *bridge = bc->base; + int slot = PCI_SLOT(devfn); + int fn = PCI_FUNC(devfn); + int busno = bus->number; + void *addr; + u32 cf; + int res; + + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); + addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; + if (get_dbe(cf, (u32 *)addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * IOC3 is broken beyond belief ... Don't even give the + * generic PCI code a chance to look at it for real ... + */ + if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) { + addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; + return ioc3_cfg_wr(addr, where, size, value); + } + + addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; + + if (size == 1) + res = put_dbe(value, (u8 *)addr); + else if (size == 2) + res = put_dbe(value, (u16 *)addr); + else + res = put_dbe(value, (u32 *)addr); + + if (res) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (!pci_is_root_bus(bus)) + return pci_conf1_write_config(bus, devfn, where, size, value); + + return pci_conf0_write_config(bus, devfn, where, size, value); +} + +static struct pci_ops bridge_pci_ops = { + .read = pci_read_config, + .write = pci_write_config, +}; + +struct bridge_irq_chip_data { + struct bridge_controller *bc; + nasid_t nasid; +}; + +static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask, + bool force) +{ +#ifdef CONFIG_NUMA + struct bridge_irq_chip_data *data = d->chip_data; + int bit = d->parent_data->hwirq; + int pin = d->hwirq; + int ret, cpu; + + ret = irq_chip_set_affinity_parent(d, mask, force); + if (ret >= 0) { + cpu = cpumask_first_and(mask, cpu_online_mask); + data->nasid = cpu_to_node(cpu); + bridge_write(data->bc, b_int_addr[pin].addr, + (((data->bc->intr_addr >> 30) & 0x30000) | + bit | (data->nasid << 8))); + bridge_read(data->bc, b_wid_tflush); + } + return ret; +#else + return irq_chip_set_affinity_parent(d, mask, force); +#endif +} + +struct irq_chip bridge_irq_chip = { + .name = "BRIDGE", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_affinity = bridge_set_affinity +}; + +static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct bridge_irq_chip_data *data; + struct irq_alloc_info *info = arg; + int ret; + + if (nr_irqs > 1 || !info) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret >= 0) { + data->bc = info->ctrl; + data->nasid = info->nasid; + irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip, + data, handle_level_irq, NULL, NULL); + } else { + kfree(data); + } + + return ret; +} + +static void bridge_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq); + + if (nr_irqs) + return; + + kfree(irqd->chip_data); + irq_domain_free_irqs_top(domain, virq, nr_irqs); +} + +static int bridge_domain_activate(struct irq_domain *domain, + struct irq_data *irqd, bool reserve) +{ + struct bridge_irq_chip_data *data = irqd->chip_data; + struct bridge_controller *bc = data->bc; + int bit = irqd->parent_data->hwirq; + int pin = irqd->hwirq; + u32 device; + + bridge_write(bc, b_int_addr[pin].addr, + (((bc->intr_addr >> 30) & 0x30000) | + bit | (data->nasid << 8))); + bridge_set(bc, b_int_enable, (1 << pin)); + bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ + + /* + * Enable sending of an interrupt clear packet to the hub on a high to + * low transition of the interrupt pin. + * + * IRIX sets additional bits in the address which are documented as + * reserved in the bridge docs. + */ + bridge_set(bc, b_int_mode, (1UL << pin)); + + /* + * We assume the bridge to have a 1:1 mapping between devices + * (slots) and intr pins. + */ + device = bridge_read(bc, b_int_device); + device &= ~(7 << (pin*3)); + device |= (pin << (pin*3)); + bridge_write(bc, b_int_device, device); + + bridge_read(bc, b_wid_tflush); + return 0; +} + +static void bridge_domain_deactivate(struct irq_domain *domain, + struct irq_data *irqd) +{ + struct bridge_irq_chip_data *data = irqd->chip_data; + + bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq)); + bridge_read(data->bc, b_wid_tflush); +} + +static const struct irq_domain_ops bridge_domain_ops = { + .alloc = bridge_domain_alloc, + .free = bridge_domain_free, + .activate = bridge_domain_activate, + .deactivate = bridge_domain_deactivate +}; + +/* + * All observed requests have pin == 1. We could have a global here, that + * gets incremented and returned every time - unfortunately, pci_map_irq + * may be called on the same device over and over, and need to return the + * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7]. + * + * A given PCI device, in general, should be able to intr any of the cpus + * on any one of the hubs connected to its xbow. + */ +static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); + struct irq_alloc_info info; + int irq; + + switch (pin) { + case PCI_INTERRUPT_UNKNOWN: + case PCI_INTERRUPT_INTA: + case PCI_INTERRUPT_INTC: + pin = 0; + break; + case PCI_INTERRUPT_INTB: + case PCI_INTERRUPT_INTD: + pin = 1; + } + + irq = bc->pci_int[slot][pin]; + if (irq == -1) { + info.ctrl = bc; + info.nasid = bc->nasid; + info.pin = bc->int_mapping[slot][pin]; + + irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info); + if (irq < 0) + return irq; + + bc->pci_int[slot][pin] = irq; + } + return irq; +} + +#define IOC3_SID(sid) (PCI_VENDOR_ID_SGI | ((sid) << 16)) + +static void bridge_setup_ip27_baseio6g(struct bridge_controller *bc) +{ + bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO6G); + bc->ioc3_sid[6] = IOC3_SID(IOC3_SUBSYS_IP27_MIO); + bc->int_mapping[2][1] = 4; + bc->int_mapping[6][1] = 6; +} + +static void bridge_setup_ip27_baseio(struct bridge_controller *bc) +{ + bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO); + bc->int_mapping[2][1] = 4; +} + +static void bridge_setup_ip29_baseio(struct bridge_controller *bc) +{ + bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP29_SYSBOARD); + bc->int_mapping[2][1] = 3; +} + +static void bridge_setup_ip30_sysboard(struct bridge_controller *bc) +{ + bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP30_SYSBOARD); + bc->int_mapping[2][1] = 4; +} + +static void bridge_setup_menet(struct bridge_controller *bc) +{ + bc->ioc3_sid[0] = IOC3_SID(IOC3_SUBSYS_MENET); + bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_MENET); + bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_MENET); + bc->ioc3_sid[3] = IOC3_SID(IOC3_SUBSYS_MENET4); +} + +static void bridge_setup_io7(struct bridge_controller *bc) +{ + bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO7); +} + +static void bridge_setup_io8(struct bridge_controller *bc) +{ + bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO8); +} + +static void bridge_setup_io9(struct bridge_controller *bc) +{ + bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_IO9); +} + +static void bridge_setup_ip34_fuel_sysboard(struct bridge_controller *bc) +{ + bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IP34_SYSBOARD); +} + +#define BRIDGE_BOARD_SETUP(_partno, _setup) \ + { .match = _partno, .setup = _setup } + +static const struct { + char *match; + void (*setup)(struct bridge_controller *bc); +} bridge_ioc3_devid[] = { + BRIDGE_BOARD_SETUP("030-0734-", bridge_setup_ip27_baseio6g), + BRIDGE_BOARD_SETUP("030-0880-", bridge_setup_ip27_baseio6g), + BRIDGE_BOARD_SETUP("030-1023-", bridge_setup_ip27_baseio), + BRIDGE_BOARD_SETUP("030-1124-", bridge_setup_ip27_baseio), + BRIDGE_BOARD_SETUP("030-1025-", bridge_setup_ip29_baseio), + BRIDGE_BOARD_SETUP("030-1244-", bridge_setup_ip29_baseio), + BRIDGE_BOARD_SETUP("030-1389-", bridge_setup_ip29_baseio), + BRIDGE_BOARD_SETUP("030-0887-", bridge_setup_ip30_sysboard), + BRIDGE_BOARD_SETUP("030-1467-", bridge_setup_ip30_sysboard), + BRIDGE_BOARD_SETUP("030-0873-", bridge_setup_menet), + BRIDGE_BOARD_SETUP("030-1557-", bridge_setup_io7), + BRIDGE_BOARD_SETUP("030-1673-", bridge_setup_io8), + BRIDGE_BOARD_SETUP("030-1771-", bridge_setup_io9), + BRIDGE_BOARD_SETUP("030-1707-", bridge_setup_ip34_fuel_sysboard), +}; + +static void bridge_setup_board(struct bridge_controller *bc, char *partnum) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bridge_ioc3_devid); i++) + if (!strncmp(partnum, bridge_ioc3_devid[i].match, + strlen(bridge_ioc3_devid[i].match))) { + bridge_ioc3_devid[i].setup(bc); + } +} + +static int bridge_nvmem_match(struct device *dev, const void *data) +{ + const char *name = dev_name(dev); + const char *prefix = data; + + if (strlen(name) < strlen(prefix)) + return 0; + + return memcmp(prefix, dev_name(dev), strlen(prefix)) == 0; +} + +static int bridge_get_partnum(u64 baddr, char *partnum) +{ + struct nvmem_device *nvmem; + char prefix[24]; + u8 prom[64]; + int i, j; + int ret; + + snprintf(prefix, sizeof(prefix), "bridge-%012llx-0b-", baddr); + + nvmem = nvmem_device_find(prefix, bridge_nvmem_match); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + ret = nvmem_device_read(nvmem, 0, 64, prom); + nvmem_device_put(nvmem); + + if (ret != 64) + return ret; + + if (crc16(CRC16_INIT, prom, 32) != CRC16_VALID || + crc16(CRC16_INIT, prom + 32, 32) != CRC16_VALID) + return -EINVAL; + + /* Assemble part number */ + j = 0; + for (i = 0; i < 19; i++) + if (prom[i + 11] != ' ') + partnum[j++] = prom[i + 11]; + + for (i = 0; i < 6; i++) + if (prom[i + 32] != ' ') + partnum[j++] = prom[i + 32]; + + partnum[j] = 0; + + return 0; +} + +static int bridge_probe(struct platform_device *pdev) +{ + struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct bridge_controller *bc; + struct pci_host_bridge *host; + struct irq_domain *domain, *parent; + struct fwnode_handle *fn; + char partnum[26]; + int slot; + int err; + + /* get part number from one wire prom */ + if (bridge_get_partnum(virt_to_phys((void *)bd->bridge_addr), partnum)) + return -EPROBE_DEFER; /* not available yet */ + + parent = irq_get_default_host(); + if (!parent) + return -ENODEV; + fn = irq_domain_alloc_named_fwnode("BRIDGE"); + if (!fn) + return -ENOMEM; + domain = irq_domain_create_hierarchy(parent, 0, 8, fn, + &bridge_domain_ops, NULL); + if (!domain) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } + + pci_set_flags(PCI_PROBE_ONLY); + + host = devm_pci_alloc_host_bridge(dev, sizeof(*bc)); + if (!host) { + err = -ENOMEM; + goto err_remove_domain; + } + + bc = pci_host_bridge_priv(host); + + bc->busn.name = "Bridge PCI busn"; + bc->busn.start = 0; + bc->busn.end = 0xff; + bc->busn.flags = IORESOURCE_BUS; + + bc->domain = domain; + + pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset); + pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset); + pci_add_resource(&host->windows, &bc->busn); + + err = devm_request_pci_bus_resources(dev, &host->windows); + if (err < 0) + goto err_free_resource; + + bc->nasid = bd->nasid; + + bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR; + bc->base = (struct bridge_regs *)bd->bridge_addr; + bc->intr_addr = bd->intr_addr; + + /* + * Clear all pending interrupts. + */ + bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR); + + /* + * Until otherwise set up, assume all interrupts are from slot 0 + */ + bridge_write(bc, b_int_device, 0x0); + + /* + * disable swapping for big windows + */ + bridge_clr(bc, b_wid_control, + BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP); +#ifdef CONFIG_PAGE_SIZE_4KB + bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); +#else /* 16kB or larger */ + bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); +#endif + + /* + * Hmm... IRIX sets additional bits in the address which + * are documented as reserved in the bridge docs. + */ + bridge_write(bc, b_wid_int_upper, + ((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16)); + bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff); + bridge_write(bc, b_dir_map, (bd->masterwid << 20)); /* DMA */ + bridge_write(bc, b_int_enable, 0); + + for (slot = 0; slot < 8; slot++) { + bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR); + bc->pci_int[slot][0] = -1; + bc->pci_int[slot][1] = -1; + /* default interrupt pin mapping */ + bc->int_mapping[slot][0] = slot; + bc->int_mapping[slot][1] = slot ^ 4; + } + bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */ + + bridge_setup_board(bc, partnum); + + host->dev.parent = dev; + host->sysdata = bc; + host->busnr = 0; + host->ops = &bridge_pci_ops; + host->map_irq = bridge_map_irq; + host->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(host); + if (err < 0) + goto err_free_resource; + + pci_bus_claim_resources(host->bus); + pci_bus_add_devices(host->bus); + + platform_set_drvdata(pdev, host->bus); + + return 0; + +err_free_resource: + pci_free_resource_list(&host->windows); +err_remove_domain: + irq_domain_remove(domain); + irq_domain_free_fwnode(fn); + return err; +} + +static int bridge_remove(struct platform_device *pdev) +{ + struct pci_bus *bus = platform_get_drvdata(pdev); + struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct fwnode_handle *fn = bc->domain->fwnode; + + irq_domain_remove(bc->domain); + irq_domain_free_fwnode(fn); + pci_lock_rescan_remove(); + pci_stop_root_bus(bus); + pci_remove_root_bus(bus); + pci_unlock_rescan_remove(); + + return 0; +} + +static struct platform_driver bridge_driver = { + .probe = bridge_probe, + .remove = bridge_remove, + .driver = { + .name = "xtalk-bridge", + } +}; + +builtin_platform_driver(bridge_driver); diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c new file mode 100644 index 000000000..feebc6eee --- /dev/null +++ b/arch/mips/pci/pci.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2011 Wind River Systems, + * written by Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +unsigned long PCIBIOS_MIN_IO; +EXPORT_SYMBOL(PCIBIOS_MIN_IO); + +unsigned long PCIBIOS_MIN_MEM; +EXPORT_SYMBOL(PCIBIOS_MIN_MEM); + +static int __init pcibios_set_cache_line_size(void) +{ + unsigned int lsize; + + /* + * Set PCI cacheline size to that of the highest level in the + * cache hierarchy. + */ + lsize = cpu_dcache_line_size(); + lsize = cpu_scache_line_size() ? : lsize; + lsize = cpu_tcache_line_size() ? : lsize; + + BUG_ON(!lsize); + + pci_dfl_cache_line_size = lsize >> 2; + + pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); + return 0; +} +arch_initcall(pcibios_set_cache_line_size); + +void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) +{ + phys_addr_t size = resource_size(rsrc); + + *start = fixup_bigphys_addr(rsrc->start, size); + *end = rsrc->start + size - 1; +} diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c new file mode 100644 index 000000000..c9edd3fb3 --- /dev/null +++ b/arch/mips/pci/pcie-octeon.c @@ -0,0 +1,2088 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007, 2008, 2009, 2010, 2011 Cavium Networks + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */ +#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ +#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */ +#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ + +/* Module parameter to disable PCI probing */ +static int pcie_disable; +module_param(pcie_disable, int, S_IRUGO); + +static int enable_pcie_14459_war; +static int enable_pcie_bus_num_war[2]; + +union cvmx_pcie_address { + uint64_t u64; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 1 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t reserved_29_31:3; /* Must be zero */ + /* + * Selects the type of the configuration request (0 = type 0, + * 1 = type 1). + */ + uint64_t ty:1; + /* Target bus number sent in the ID in the request. */ + uint64_t bus:8; + /* + * Target device number sent in the ID in the + * request. Note that Dev must be zero for type 0 + * configuration requests. + */ + uint64_t dev:5; + /* Target function number sent in the ID in the request. */ + uint64_t func:3; + /* + * Selects a register in the configuration space of + * the target. + */ + uint64_t reg:12; + } config; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 2 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t address:32; /* PCIe IO address */ + } io; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 3-6 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t address:36; /* PCIe Mem address */ + } mem; +}; + +static int cvmx_pcie_rc_initialize(int pcie_port); + +/** + * Return the Core virtual base address for PCIe IO access. IOs are + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.io.upper = 0; + pcie_addr.io.io = 1; + pcie_addr.io.did = 3; + pcie_addr.io.subdid = 2; + pcie_addr.io.es = 1; + pcie_addr.io.port = pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the IO address region returned at address + * cvmx_pcie_get_io_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the IO window + */ +static inline uint64_t cvmx_pcie_get_io_size(int pcie_port) +{ + return 1ull << 32; +} + +/** + * Return the Core virtual base address for PCIe MEM access. Memory is + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.mem.upper = 0; + pcie_addr.mem.io = 1; + pcie_addr.mem.did = 3; + pcie_addr.mem.subdid = 3 + pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the Mem address region returned at address + * cvmx_pcie_get_mem_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the Mem window + */ +static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port) +{ + return 1ull << 36; +} + +/** + * Read a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to read from + * @cfg_offset: Address to read + * + * Returns Value read + */ +static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) +{ + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { + union cvmx_pescx_cfg_rd pescx_cfg_rd; + pescx_cfg_rd.u64 = 0; + pescx_cfg_rd.s.addr = cfg_offset; + cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); + pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); + return pescx_cfg_rd.s.data; + } else { + union cvmx_pemx_cfg_rd pemx_cfg_rd; + pemx_cfg_rd.u64 = 0; + pemx_cfg_rd.s.addr = cfg_offset; + cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64); + pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port)); + return pemx_cfg_rd.s.data; + } +} + +/** + * Write a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to write to + * @cfg_offset: Address to write + * @val: Value to write + */ +static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, + uint32_t val) +{ + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { + union cvmx_pescx_cfg_wr pescx_cfg_wr; + pescx_cfg_wr.u64 = 0; + pescx_cfg_wr.s.addr = cfg_offset; + pescx_cfg_wr.s.data = val; + cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); + } else { + union cvmx_pemx_cfg_wr pemx_cfg_wr; + pemx_cfg_wr.u64 = 0; + pemx_cfg_wr.s.addr = cfg_offset; + pemx_cfg_wr.s.data = val; + cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64); + } +} + +/** + * Build a PCIe config space request address for a device + * + * @pcie_port: PCIe port to access + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns 64bit Octeon IO address + */ +static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, + int dev, int fn, int reg) +{ + union cvmx_pcie_address pcie_addr; + union cvmx_pciercx_cfg006 pciercx_cfg006; + + pciercx_cfg006.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); + if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) + return 0; + + pcie_addr.u64 = 0; + pcie_addr.config.upper = 2; + pcie_addr.config.io = 1; + pcie_addr.config.did = 3; + pcie_addr.config.subdid = 1; + pcie_addr.config.es = 1; + pcie_addr.config.port = pcie_port; + pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum); + pcie_addr.config.bus = bus; + pcie_addr.config.dev = dev; + pcie_addr.config.func = fn; + pcie_addr.config.reg = reg; + return pcie_addr.u64; +} + +/** + * Read 8bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return cvmx_read64_uint8(address); + else + return 0xff; +} + +/** + * Read 16bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le16_to_cpu(cvmx_read64_uint16(address)); + else + return 0xffff; +} + +/** + * Read 32bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le32_to_cpu(cvmx_read64_uint32(address)); + else + return 0xffffffff; +} + +/** + * Write 8bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, + int reg, uint8_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint8(address, val); +} + +/** + * Write 16bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, + int reg, uint16_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint16(address, cpu_to_le16(val)); +} + +/** + * Write 32bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, + int reg, uint32_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint32(address, cpu_to_le32(val)); +} + +/** + * Initialize the RC config space CSRs + * + * @pcie_port: PCIe port to initialize + */ +static void __cvmx_pcie_rc_initialize_config_space(int pcie_port) +{ + union cvmx_pciercx_cfg030 pciercx_cfg030; + union cvmx_pciercx_cfg070 pciercx_cfg070; + union cvmx_pciercx_cfg001 pciercx_cfg001; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg006 pciercx_cfg006; + union cvmx_pciercx_cfg008 pciercx_cfg008; + union cvmx_pciercx_cfg009 pciercx_cfg009; + union cvmx_pciercx_cfg010 pciercx_cfg010; + union cvmx_pciercx_cfg011 pciercx_cfg011; + union cvmx_pciercx_cfg035 pciercx_cfg035; + union cvmx_pciercx_cfg075 pciercx_cfg075; + union cvmx_pciercx_cfg034 pciercx_cfg034; + + /* Max Payload Size (PCIE*_CFG030[MPS]) */ + /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ + /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ + /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ + + pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port)); + if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + pciercx_cfg030.s.mps = MPS_CN5XXX; + pciercx_cfg030.s.mrrs = MRRS_CN5XXX; + } else { + pciercx_cfg030.s.mps = MPS_CN6XXX; + pciercx_cfg030.s.mrrs = MRRS_CN6XXX; + } + /* + * Enable relaxed order processing. This will allow devices to + * affect read response ordering. + */ + pciercx_cfg030.s.ro_en = 1; + /* Enable no snoop processing. Not used by Octeon */ + pciercx_cfg030.s.ns_en = 1; + /* Correctable error reporting enable. */ + pciercx_cfg030.s.ce_en = 1; + /* Non-fatal error reporting enable. */ + pciercx_cfg030.s.nfe_en = 1; + /* Fatal error reporting enable. */ + pciercx_cfg030.s.fe_en = 1; + /* Unsupported request reporting enable. */ + pciercx_cfg030.s.ur_en = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32); + + + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { + union cvmx_npei_ctl_status2 npei_ctl_status2; + /* + * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match + * PCIE*_CFG030[MPS]. Max Read Request Size + * (NPEI_CTL_STATUS2[MRRS]) must not exceed + * PCIE*_CFG030[MRRS] + */ + npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); + /* Max payload size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mps = MPS_CN5XXX; + /* Max read request size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mrrs = MRRS_CN5XXX; + if (pcie_port) + npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */ + else + npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */ + + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); + } else { + /* + * Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match + * PCIE*_CFG030[MPS]. Max Read Request Size + * (DPI_SLI_PRTX_CFG[MRRS]) must not exceed + * PCIE*_CFG030[MRRS]. + */ + union cvmx_dpi_sli_prtx_cfg prt_cfg; + union cvmx_sli_s2m_portx_ctl sli_s2m_portx_ctl; + prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port)); + prt_cfg.s.mps = MPS_CN6XXX; + prt_cfg.s.mrrs = MRRS_CN6XXX; + /* Max outstanding load request. */ + prt_cfg.s.molr = 32; + cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64); + + sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port)); + sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX; + cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64); + } + + /* ECRC Generation (PCIE*_CFG070[GE,CE]) */ + pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port)); + pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */ + pciercx_cfg070.s.ce = 1; /* ECRC check enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32); + + /* + * Access Enables (PCIE*_CFG001[MSAE,ME]) + * ME and MSAE should always be set. + * Interrupt Disable (PCIE*_CFG001[I_DIS]) + * System Error Message Enable (PCIE*_CFG001[SEE]) + */ + pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port)); + pciercx_cfg001.s.msae = 1; /* Memory space enable. */ + pciercx_cfg001.s.me = 1; /* Bus master enable. */ + pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */ + pciercx_cfg001.s.see = 1; /* SERR# enable */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32); + + /* Advanced Error Recovery Message Enables */ + /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0); + /* Use CVMX_PCIERCX_CFG067 hardware default */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0); + + + /* Active State Power Management (PCIE*_CFG032[ASLPC]) */ + pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32); + + /* + * Link Width Mode (PCIERCn_CFG452[LME]) - Set during + * cvmx_pcie_rc_initialize_link() + * + * Primary Bus Number (PCIERCn_CFG006[PBNUM]) + * + * We set the primary bus number to 1 so IDT bridges are + * happy. They don't like zero. + */ + pciercx_cfg006.u32 = 0; + pciercx_cfg006.s.pbnum = 1; + pciercx_cfg006.s.sbnum = 1; + pciercx_cfg006.s.subbnum = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32); + + + /* + * Memory-mapped I/O BAR (PCIERCn_CFG008) + * Most applications should disable the memory-mapped I/O BAR by + * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] + */ + pciercx_cfg008.u32 = 0; + pciercx_cfg008.s.mb_addr = 0x100; + pciercx_cfg008.s.ml_addr = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32); + + + /* + * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) + * Most applications should disable the prefetchable BAR by setting + * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < + * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] + */ + pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port)); + pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port)); + pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port)); + pciercx_cfg009.s.lmem_base = 0x100; + pciercx_cfg009.s.lmem_limit = 0; + pciercx_cfg010.s.umem_base = 0x100; + pciercx_cfg011.s.umem_limit = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32); + + /* + * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) + * PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) + */ + pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port)); + pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */ + pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */ + pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */ + pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32); + + /* + * Advanced Error Recovery Interrupt Enables + * (PCIERCn_CFG075[CERE,NFERE,FERE]) + */ + pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port)); + pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */ + pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */ + pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32); + + /* + * HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], + * PCIERCn_CFG034[DLLS_EN,CCINT_EN]) + */ + pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port)); + pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */ + pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */ + pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32); +} + +/** + * Initialize a host mode PCIe gen 1 link. This function takes a PCIe + * port from reset to a link up state. Software can then begin + * configuring the rest of the link. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port) +{ + uint64_t start_cycle; + union cvmx_pescx_ctl_status pescx_ctl_status; + union cvmx_pciercx_cfg452 pciercx_cfg452; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg448 pciercx_cfg448; + + /* Set the lane width */ + pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port)); + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + if (pescx_ctl_status.s.qlm_cfg == 0) + /* We're in 8 lane (56XX) or 4 lane (54XX) mode */ + pciercx_cfg452.s.lme = 0xf; + else + /* We're in 4 lane (56XX) or 2 lane (52XX) mode */ + pciercx_cfg452.s.lme = 0x7; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32); + + /* + * CN52XX pass 1.x has an errata where length mismatches on UR + * responses can cause bus errors on 64bit memory + * reads. Turning off length error checking fixes this. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + union cvmx_pciercx_cfg455 pciercx_cfg455; + pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port)); + pciercx_cfg455.s.m_cpl_len_err = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32); + } + + /* Lane swap needs to be manually enabled for CN52XX */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) { + pescx_ctl_status.s.lane_swp = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); + } + + /* Bring up the link */ + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + pescx_ctl_status.s.lnk_enb = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); + + /* + * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to + * be disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) + __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0); + + /* Wait for the link to come up */ + start_cycle = cvmx_get_cycle(); + do { + if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) { + cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port); + return -1; + } + __delay(10000); + pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + } while (pciercx_cfg032.s.dlla == 0); + + /* Clear all pending errors */ + cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM)); + + /* + * Update the Replay Time Limit. Empirically, some PCIe + * devices take a little longer to respond than expected under + * load. As a workaround for this we configure the Replay Time + * Limit to the value expected for a 512 byte MPS instead of + * our actual 256 byte MPS. The numbers below are directly + * from the PCIe spec table 3-4. + */ + pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); + switch (pciercx_cfg032.s.nlw) { + case 1: /* 1 lane */ + pciercx_cfg448.s.rtl = 1677; + break; + case 2: /* 2 lanes */ + pciercx_cfg448.s.rtl = 867; + break; + case 4: /* 4 lanes */ + pciercx_cfg448.s.rtl = 462; + break; + case 8: /* 8 lanes */ + pciercx_cfg448.s.rtl = 258; + break; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); + + return 0; +} + +static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas) +{ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + pmas->cn68xx.ba++; + else + pmas->s.ba++; +} + +/** + * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't + * enumerate the bus. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int __cvmx_pcie_rc_initialize_gen1(int pcie_port) +{ + int i; + int base; + u64 addr_swizzle; + union cvmx_ciu_soft_prst ciu_soft_prst; + union cvmx_pescx_bist_status pescx_bist_status; + union cvmx_pescx_bist_status2 pescx_bist_status2; + union cvmx_npei_ctl_status npei_ctl_status; + union cvmx_npei_mem_access_ctl npei_mem_access_ctl; + union cvmx_npei_mem_access_subidx mem_access_subid; + union cvmx_npei_dbg_data npei_dbg_data; + union cvmx_pescx_ctl_status2 pescx_ctl_status2; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_npei_bar1_indexx bar1_index; + +retry: + /* + * Make sure we aren't trying to setup a target mode interface + * in host mode. + */ + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) { + cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port); + return -1; + } + + /* + * Make sure a CN52XX isn't trying to bring up port 1 when it + * is disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) { + cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n"); + return -1; + } + } + + /* + * PCIe switch arbitration mode. '0' == fixed priority NPEI, + * PCIe0, then PCIe1. '1' == round robin. + */ + npei_ctl_status.s.arb = 1; + /* Allow up to 0x20 config retries */ + npei_ctl_status.s.cfg_rtry = 0x20; + /* + * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS + * don't reset. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + npei_ctl_status.s.p0_ntags = 0x20; + npei_ctl_status.s.p1_ntags = 0x20; + } + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64); + + /* Bring the PCIe out of reset */ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) { + /* + * The EBH5200 board swapped the PCIe reset lines on + * the board. As a workaround for this bug, we bring + * both PCIe ports out of reset at the same time + * instead of on separate calls. So for port 0, we + * bring both out of reset and do nothing on port 1 + */ + if (pcie_port == 0) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is + * trying to init it again without a proper + * PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the ports */ + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } else { + /* + * The normal case: The PCIe ports are completely + * separate and can be brought out of reset + * independently. + */ + if (pcie_port) + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + else + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is trying + * to init it again without a proper PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the port */ + ciu_soft_prst.s.soft_prst = 1; + if (pcie_port) + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + else + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + if (pcie_port) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + } else { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } + + /* + * Wait for PCIe reset to complete. Due to errata PCIE-700, we + * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a + * fixed number of cycles. + */ + __delay(400000); + + /* + * PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of + * CN56XX and CN52XX, so we only probe it on newer chips + */ + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Clear PCLK_RUN so we can check if the clock is running */ + pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + pescx_ctl_status2.s.pclk_run = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64); + /* Now that we cleared PCLK_RUN, wait for it to be set + * again telling us the clock is running + */ + if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port), + union cvmx_pescx_ctl_status2, pclk_run, ==, 1, 10000)) { + cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port); + return -1; + } + } + + /* + * Check and make sure PCIe came out of reset. If it doesn't + * the board probably hasn't wired the clocks up and the + * interface should be skipped. + */ + pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + if (pescx_ctl_status2.s.pcierst) { + cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); + return -1; + } + + /* + * Check BIST2 status. If any bits are set skip this + * interface. This is an attempt to catch PCIE-813 on pass 1 + * parts. + */ + pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port)); + if (pescx_bist_status2.u64) { + cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", + pcie_port); + return -1; + } + + /* Check BIST status */ + pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port)); + if (pescx_bist_status.u64) + cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", + pcie_port, CAST64(pescx_bist_status.u64)); + + /* Initialize the config space CSRs */ + __cvmx_pcie_rc_initialize_config_space(pcie_port); + + /* Bring the link up */ + if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) { + cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", + pcie_port); + return -1; + } + + /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ + npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL); + npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ + npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64); + + /* Setup Mem access SubDIDs */ + mem_access_subid.u64 = 0; + mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ + mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */ + mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ + mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ + mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */ + mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */ + mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */ + mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */ + mem_access_subid.s.ba = 0; /* PCIe Address Bits <63:34>. */ + + /* + * Setup mem access 12-15 for port 0, 16-19 for port 1, + * supplying 36 bits of address space. + */ + for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) { + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); + mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */ + } + + /* + * Disable the peer to peer forwarding register. This must be + * setup by the OS after it enumerates the bus and assigns + * addresses to the PCIe busses. + */ + for (i = 0; i < 4; i++) { + cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1); + cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1); + } + + /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0); + + /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); + + bar1_index.u32 = 0; + bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); + bar1_index.s.ca = 1; /* Not Cached */ + bar1_index.s.end_swp = 1; /* Endian Swap mode */ + bar1_index.s.addr_v = 1; /* Valid entry */ + + base = pcie_port ? 16 : 0; + + /* Big endian swizzle for 32-bit PEXP_NCB register. */ +#ifdef __MIPSEB__ + addr_swizzle = 4; +#else + addr_swizzle = 0; +#endif + for (i = 0; i < 16; i++) { + cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), + bar1_index.u32); + base++; + /* 256MB / 16 >> 22 == 4 */ + bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); + } + + /* + * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take + * precedence where they overlap. It also overlaps with the + * device addresses, so make sure the peer to peer forwarding + * is set right. + */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0); + + /* + * Setup BAR2 attributes + * + * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) + * - PTLP_RO,CTLP_RO should normally be set (except for debug). + * - WAIT_COM=0 will likely work for all applications. + * + * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]). + */ + if (pcie_port) { + union cvmx_npei_ctl_port1 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64); + } else { + union cvmx_npei_ctl_port0 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64); + } + + /* + * Both pass 1 and pass 2 of CN52XX and CN56XX have an errata + * that causes TLP ordering to not be preserved after multiple + * PCIe port resets. This code detects this fault and corrects + * it by aligning the TLP counters properly. Another link + * reset is then performed. See PCIE-13340 + */ + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || + OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + union cvmx_npei_dbg_data dbg_data; + int old_in_fif_p_count; + int in_fif_p_count; + int out_p_count; + int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1; + int i; + + /* + * Choose a write address of 1MB. It should be + * harmless as all bars haven't been setup. + */ + uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63); + + /* + * Make sure at least in_p_offset have been executed before we try and + * read in_fif_p_count + */ + i = in_p_offset; + while (i--) { + cvmx_write64_uint32(write_address, 0); + __delay(10000); + } + + /* + * Read the IN_FIF_P_COUNT from the debug + * select. IN_FIF_P_COUNT can be unstable sometimes so + * read it twice with a write between the reads. This + * way we can tell the value is good as it will + * increment by one due to the write + */ + cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc); + cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); + do { + dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + old_in_fif_p_count = dbg_data.s.data & 0xff; + cvmx_write64_uint32(write_address, 0); + __delay(10000); + dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + in_fif_p_count = dbg_data.s.data & 0xff; + } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff)); + + /* Update in_fif_p_count for it's offset with respect to out_p_count */ + in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff; + + /* Read the OUT_P_COUNT from the debug select */ + cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f); + cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); + dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + out_p_count = (dbg_data.s.data>>1) & 0xff; + + /* Check that the two counters are aligned */ + if (out_p_count != in_fif_p_count) { + cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port); + while (in_fif_p_count != 0) { + cvmx_write64_uint32(write_address, 0); + __delay(10000); + in_fif_p_count = (in_fif_p_count + 1) & 0xff; + } + /* + * The EBH5200 board swapped the PCIe reset + * lines on the board. This means we must + * bring both links down and up, which will + * cause the PCIe0 to need alignment + * again. Lots of messages will be displayed, + * but everything should work + */ + if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) && + (pcie_port == 1)) + cvmx_pcie_rc_initialize(0); + /* Rety bringing this port up */ + goto retry; + } + } + + /* Display the link status */ + pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw); + + return 0; +} + +/** + * Initialize a host mode PCIe gen 2 link. This function takes a PCIe + * port from reset to a link up state. Software can then begin + * configuring the rest of the link. + * + * @pcie_port: PCIe port to initialize + * + * Return Zero on success. + */ +static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port) +{ + uint64_t start_cycle; + union cvmx_pemx_ctl_status pem_ctl_status; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg448 pciercx_cfg448; + + /* Bring up the link */ + pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); + pem_ctl_status.s.lnk_enb = 1; + cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64); + + /* Wait for the link to come up */ + start_cycle = cvmx_get_cycle(); + do { + if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate()) + return -1; + __delay(10000); + pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1)); + + /* + * Update the Replay Time Limit. Empirically, some PCIe + * devices take a little longer to respond than expected under + * load. As a workaround for this we configure the Replay Time + * Limit to the value expected for a 512 byte MPS instead of + * our actual 256 byte MPS. The numbers below are directly + * from the PCIe spec table 3-4 + */ + pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); + switch (pciercx_cfg032.s.nlw) { + case 1: /* 1 lane */ + pciercx_cfg448.s.rtl = 1677; + break; + case 2: /* 2 lanes */ + pciercx_cfg448.s.rtl = 867; + break; + case 4: /* 4 lanes */ + pciercx_cfg448.s.rtl = 462; + break; + case 8: /* 8 lanes */ + pciercx_cfg448.s.rtl = 258; + break; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); + + return 0; +} + + +/** + * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate + * the bus. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success. + */ +static int __cvmx_pcie_rc_initialize_gen2(int pcie_port) +{ + int i; + union cvmx_ciu_soft_prst ciu_soft_prst; + union cvmx_mio_rst_ctlx mio_rst_ctl; + union cvmx_pemx_bar_ctl pemx_bar_ctl; + union cvmx_pemx_ctl_status pemx_ctl_status; + union cvmx_pemx_bist_status pemx_bist_status; + union cvmx_pemx_bist_status2 pemx_bist_status2; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg515 pciercx_cfg515; + union cvmx_sli_ctl_portx sli_ctl_portx; + union cvmx_sli_mem_access_ctl sli_mem_access_ctl; + union cvmx_sli_mem_access_subidx mem_access_subid; + union cvmx_sriox_status_reg sriox_status_reg; + union cvmx_pemx_bar1_indexx bar1_index; + + if (octeon_has_feature(OCTEON_FEATURE_SRIO)) { + /* Make sure this interface isn't SRIO */ + if (OCTEON_IS_MODEL(OCTEON_CN66XX)) { + /* + * The CN66XX requires reading the + * MIO_QLMX_CFG register to figure out the + * port type. + */ + union cvmx_mio_qlmx_cfg qlmx_cfg; + qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(pcie_port)); + + if (qlmx_cfg.s.qlm_spd == 15) { + pr_notice("PCIe: Port %d is disabled, skipping.\n", pcie_port); + return -1; + } + + switch (qlmx_cfg.s.qlm_spd) { + case 0x1: /* SRIO 1x4 short */ + case 0x3: /* SRIO 1x4 long */ + case 0x4: /* SRIO 2x2 short */ + case 0x6: /* SRIO 2x2 long */ + pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port); + return -1; + case 0x9: /* SGMII */ + pr_notice("PCIe: Port %d is SGMII, skipping.\n", pcie_port); + return -1; + case 0xb: /* XAUI */ + pr_notice("PCIe: Port %d is XAUI, skipping.\n", pcie_port); + return -1; + case 0x0: /* PCIE gen2 */ + case 0x8: /* PCIE gen2 (alias) */ + case 0x2: /* PCIE gen1 */ + case 0xa: /* PCIE gen1 (alias) */ + break; + default: + pr_notice("PCIe: Port %d is unknown, skipping.\n", pcie_port); + return -1; + } + } else { + sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port)); + if (sriox_status_reg.s.srio) { + pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port); + return -1; + } + } + } + +#if 0 + /* This code is so that the PCIe analyzer is able to see 63XX traffic */ + pr_notice("PCIE : init for pcie analyzer.\n"); + cvmx_helper_qlm_jtag_init(); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); + cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); + cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); + cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); + cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); + cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); + cvmx_helper_qlm_jtag_update(pcie_port); +#endif + + /* Make sure we aren't trying to setup a target mode interface in host mode */ + mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); + if (!mio_rst_ctl.s.host_mode) { + pr_notice("PCIe: Port %d in endpoint mode.\n", pcie_port); + return -1; + } + + /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) { + if (pcie_port) { + union cvmx_ciu_qlm ciu_qlm; + ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1); + ciu_qlm.s.txbypass = 1; + ciu_qlm.s.txdeemph = 5; + ciu_qlm.s.txmargin = 0x17; + cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64); + } else { + union cvmx_ciu_qlm ciu_qlm; + ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0); + ciu_qlm.s.txbypass = 1; + ciu_qlm.s.txdeemph = 5; + ciu_qlm.s.txmargin = 0x17; + cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64); + } + } + /* Bring the PCIe out of reset */ + if (pcie_port) + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + else + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in reset. If it + * isn't, most likely someone is trying to init it again + * without a proper PCIe reset + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the port */ + ciu_soft_prst.s.soft_prst = 1; + if (pcie_port) + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + else + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + if (pcie_port) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + } else { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + + /* Wait for PCIe reset to complete */ + udelay(1000); + + /* + * Check and make sure PCIe came out of reset. If it doesn't + * the board probably hasn't wired the clocks up and the + * interface should be skipped. + */ + if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), union cvmx_mio_rst_ctlx, rst_done, ==, 1, 10000)) { + pr_notice("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); + return -1; + } + + /* Check BIST status */ + pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port)); + if (pemx_bist_status.u64) + pr_notice("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64)); + pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port)); + /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) + pemx_bist_status2.u64 &= ~0x3full; + if (pemx_bist_status2.u64) + pr_notice("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64)); + + /* Initialize the config space CSRs */ + __cvmx_pcie_rc_initialize_config_space(pcie_port); + + /* Enable gen2 speed selection */ + pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port)); + pciercx_cfg515.s.dsc = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32); + + /* Bring the link up */ + if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) { + /* + * Some gen1 devices don't handle the gen 2 training + * correctly. Disable gen2 and try again with only + * gen1 + */ + union cvmx_pciercx_cfg031 pciercx_cfg031; + pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port)); + pciercx_cfg031.s.mls = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32); + if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) { + pr_notice("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port); + return -1; + } + } + + /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ + sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL); + sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ + sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ + cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64); + + /* Setup Mem access SubDIDs */ + mem_access_subid.u64 = 0; + mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ + mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */ + mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ + mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ + mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ + mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ + /* PCIe Address Bits <63:34>. */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + mem_access_subid.cn68xx.ba = 0; + else + mem_access_subid.s.ba = 0; + + /* + * Setup mem access 12-15 for port 0, 16-19 for port 1, + * supplying 36 bits of address space. + */ + for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) { + cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); + /* Set each SUBID to extend the addressable range */ + __cvmx_increment_ba(&mem_access_subid); + } + + /* + * Disable the peer to peer forwarding register. This must be + * setup by the OS after it enumerates the bus and assigns + * addresses to the PCIe busses. + */ + for (i = 0; i < 4; i++) { + cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1); + cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1); + } + + /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ + cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0); + + /* + * Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take + * precedence where they overlap. It also overlaps with the + * device addresses, so make sure the peer to peer forwarding + * is set right. + */ + cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0); + + /* + * Setup BAR2 attributes + * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) + * - PTLP_RO,CTLP_RO should normally be set (except for debug). + * - WAIT_COM=0 will likely work for all applications. + * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) + */ + pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port)); + pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/ + pemx_bar_ctl.s.bar2_enb = 1; + pemx_bar_ctl.s.bar2_esx = 1; + pemx_bar_ctl.s.bar2_cax = 0; + cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64); + sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port)); + sli_ctl_portx.s.ptlp_ro = 1; + sli_ctl_portx.s.ctlp_ro = 1; + sli_ctl_portx.s.wait_com = 0; + sli_ctl_portx.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64); + + /* BAR1 follows BAR2 */ + cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); + + bar1_index.u64 = 0; + bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); + bar1_index.s.ca = 1; /* Not Cached */ + bar1_index.s.end_swp = 1; /* Endian Swap mode */ + bar1_index.s.addr_v = 1; /* Valid entry */ + + for (i = 0; i < 16; i++) { + cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64); + /* 256MB / 16 >> 22 == 4 */ + bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); + } + + /* + * Allow config retries for 250ms. Count is based off the 5Ghz + * SERDES clock. + */ + pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); + pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000; + cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64); + + /* Display the link status */ + pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls); + + return 0; +} + +/** + * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int cvmx_pcie_rc_initialize(int pcie_port) +{ + int result; + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) + result = __cvmx_pcie_rc_initialize_gen1(pcie_port); + else + result = __cvmx_pcie_rc_initialize_gen2(pcie_port); + return result; +} + +/* Above was cvmx-pcie.c, below original pcie.c */ + +/** + * Map a PCI device to the appropriate interrupt line + * + * @dev: The Linux PCI device structure for the device to map + * @slot: The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @pin: The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * Returns Interrupt number for the device + */ +int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + /* + * The EBH5600 board with the PCI to PCIe bridge mistakenly + * wires the first slot for both device id 2 and interrupt + * A. According to the PCI spec, device id 2 should be C. The + * following kludge attempts to fix this. + */ + if (strstr(octeon_board_type_string(), "EBH5600") && + dev->bus && dev->bus->parent) { + /* + * Iterate all the way up the device chain and find + * the root bus. + */ + while (dev->bus && dev->bus->parent) + dev = to_pci_dev(dev->bus->bridge); + /* + * If the root bus is number 0 and the PEX 8114 is the + * root, assume we are behind the miswired bus. We + * need to correct the swizzle level by two. Yuck. + */ + if ((dev->bus->number == 1) && + (dev->vendor == 0x10b5) && (dev->device == 0x8114)) { + /* + * The pin field is one based, not zero. We + * need to swizzle it by minus two. + */ + pin = ((pin - 3) & 3) + 1; + } + } + /* + * The -1 is because pin starts with one, not zero. It might + * be that this equation needs to include the slot number, but + * I don't have hardware to check that against. + */ + return pin - 1 + OCTEON_IRQ_PCI_INT0; +} + +static void set_cfg_read_retry(u32 retry_cnt) +{ + union cvmx_pemx_ctl_status pemx_ctl; + pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1)); + pemx_ctl.s.cfg_rtry = retry_cnt; + cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64); +} + + +static u32 disable_cfg_read_retry(void) +{ + u32 retry_cnt; + + union cvmx_pemx_ctl_status pemx_ctl; + pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1)); + retry_cnt = pemx_ctl.s.cfg_rtry; + pemx_ctl.s.cfg_rtry = 0; + cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64); + return retry_cnt; +} + +static int is_cfg_retry(void) +{ + union cvmx_pemx_int_sum pemx_int_sum; + pemx_int_sum.u64 = cvmx_read_csr(CVMX_PEMX_INT_SUM(1)); + if (pemx_int_sum.s.crs_dr) + return 1; + return 0; +} + +/* + * Read a value from configuration space + * + */ +static int octeon_pcie_read_config(unsigned int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, int size, + u32 *val) +{ + union octeon_cvmemctl cvmmemctl; + union octeon_cvmemctl cvmmemctl_save; + int bus_number = bus->number; + int cfg_retry = 0; + int retry_cnt = 0; + int max_retry_cnt = 10; + u32 cfg_retry_cnt = 0; + + cvmmemctl_save.u64 = 0; + BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war)); + /* + * For the top level bus make sure our hardware bus number + * matches the software one + */ + if (bus->parent == NULL) { + if (enable_pcie_bus_num_war[pcie_port]) + bus_number = 0; + else { + union cvmx_pciercx_cfg006 pciercx_cfg006; + pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, + CVMX_PCIERCX_CFG006(pcie_port)); + if (pciercx_cfg006.s.pbnum != bus_number) { + pciercx_cfg006.s.pbnum = bus_number; + pciercx_cfg006.s.sbnum = bus_number; + pciercx_cfg006.s.subbnum = bus_number; + cvmx_pcie_cfgx_write(pcie_port, + CVMX_PCIERCX_CFG006(pcie_port), + pciercx_cfg006.u32); + } + } + } + + /* + * PCIe only has a single device connected to Octeon. It is + * always device ID 0. Don't bother doing reads for other + * device IDs on the first segment. + */ + if ((bus->parent == NULL) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * The following is a workaround for the CN57XX, CN56XX, + * CN55XX, and CN54XX errata with PCIe config reads from non + * existent devices. These chips will hang the PCIe link if a + * config read is performed that causes a UR response. + */ + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) { + /* + * For our EBH5600 board, port 0 has a bridge with two + * PCI-X slots. We need a new special checks to make + * sure we only probe valid stuff. The PCIe->PCI-X + * bridge only respondes to device ID 0, function + * 0-1 + */ + if ((bus->parent == NULL) && (devfn >= 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* + * The PCI-X slots are device ID 2,3. Choose one of + * the below "if" blocks based on what is plugged into + * the board. + */ +#if 1 + /* Use this option if you aren't using either slot */ + if (bus_number == 2) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the first slot but + * not the second. + */ + if ((bus_number == 2) && (devfn >> 3 != 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the second slot + * but not the first. + */ + if ((bus_number == 2) && (devfn >> 3 != 3)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* Use this opion if you are using both slots */ + if ((bus_number == 2) && + !((devfn == (2 << 3)) || (devfn == (3 << 3)))) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#endif + + /* The following #if gives a more complicated example. This is + the required checks for running a Nitrox CN16XX-NHBX in the + slot of the EBH5600. This card has a PLX PCIe bridge with + four Nitrox PLX parts behind it */ +#if 0 + /* PLX bridge with 4 ports */ + if ((bus_number == 4) && + !((devfn >> 3 >= 1) && (devfn >> 3 <= 4))) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* Nitrox behind PLX 1 */ + if ((bus_number == 5) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* Nitrox behind PLX 2 */ + if ((bus_number == 6) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* Nitrox behind PLX 3 */ + if ((bus_number == 7) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* Nitrox behind PLX 4 */ + if ((bus_number == 8) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#endif + + /* + * Shorten the DID timeout so bus errors for PCIe + * config reads from non existent devices happen + * faster. This allows us to continue booting even if + * the above "if" checks are wrong. Once one of these + * errors happens, the PCIe port is dead. + */ + cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7); + cvmmemctl.u64 = cvmmemctl_save.u64; + cvmmemctl.s.didtto = 2; + __write_64bit_c0_register($11, 7, cvmmemctl.u64); + } + + if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war)) + cfg_retry_cnt = disable_cfg_read_retry(); + + pr_debug("pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x" + " size=%d ", pcie_port, bus_number, devfn, reg, size); + do { + switch (size) { + case 4: + *val = cvmx_pcie_config_read32(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 2: + *val = cvmx_pcie_config_read16(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 1: + *val = cvmx_pcie_config_read8(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + default: + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) + set_cfg_read_retry(cfg_retry_cnt); + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && + (enable_pcie_14459_war)) { + cfg_retry = is_cfg_retry(); + retry_cnt++; + if (retry_cnt > max_retry_cnt) { + pr_err(" pcie cfg_read retries failed. retry_cnt=%d\n", + retry_cnt); + cfg_retry = 0; + } + } + } while (cfg_retry); + + if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war)) + set_cfg_read_retry(cfg_retry_cnt); + pr_debug("val=%08x : tries=%02d\n", *val, retry_cnt); + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) + write_c0_cvmmemctl(cvmmemctl_save.u64); + return PCIBIOS_SUCCESSFUL; +} + +static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(1, bus, devfn, reg, size, val); +} + +static int octeon_dummy_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + +/* + * Write a value to PCI configuration space + */ +static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, + int size, u32 val) +{ + int bus_number = bus->number; + + BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war)); + + if ((bus->parent == NULL) && (enable_pcie_bus_num_war[pcie_port])) + bus_number = 0; + + pr_debug("pcie_cfg_wr port=%d b=%d devfn=0x%03x" + " reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn, + reg, size, val); + + + switch (size) { + case 4: + cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + break; + case 2: + cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + break; + case 1: + cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + return PCIBIOS_SUCCESSFUL; +} + +static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(1, bus, devfn, reg, size, val); +} + +static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + +static struct pci_ops octeon_pcie0_ops = { + .read = octeon_pcie0_read_config, + .write = octeon_pcie0_write_config, +}; + +static struct resource octeon_pcie0_mem_resource = { + .name = "Octeon PCIe0 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie0_io_resource = { + .name = "Octeon PCIe0 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie0_controller = { + .pci_ops = &octeon_pcie0_ops, + .mem_resource = &octeon_pcie0_mem_resource, + .io_resource = &octeon_pcie0_io_resource, +}; + +static struct pci_ops octeon_pcie1_ops = { + .read = octeon_pcie1_read_config, + .write = octeon_pcie1_write_config, +}; + +static struct resource octeon_pcie1_mem_resource = { + .name = "Octeon PCIe1 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie1_io_resource = { + .name = "Octeon PCIe1 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie1_controller = { + .pci_ops = &octeon_pcie1_ops, + .mem_resource = &octeon_pcie1_mem_resource, + .io_resource = &octeon_pcie1_io_resource, +}; + +static struct pci_ops octeon_dummy_ops = { + .read = octeon_dummy_read_config, + .write = octeon_dummy_write_config, +}; + +static struct resource octeon_dummy_mem_resource = { + .name = "Virtual PCIe MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_dummy_io_resource = { + .name = "Virtual PCIe IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_dummy_controller = { + .pci_ops = &octeon_dummy_ops, + .mem_resource = &octeon_dummy_mem_resource, + .io_resource = &octeon_dummy_io_resource, +}; + +static int device_needs_bus_num_war(uint32_t deviceid) +{ +#define IDT_VENDOR_ID 0x111d + + if ((deviceid & 0xffff) == IDT_VENDOR_ID) + return 1; + return 0; +} + +/** + * Initialize the Octeon PCIe controllers + * + * Returns + */ +static int __init octeon_pcie_setup(void) +{ + int result; + int host_mode; + int srio_war15205 = 0, port; + union cvmx_sli_ctl_portx sli_ctl_portx; + union cvmx_sriox_status_reg sriox_status_reg; + + /* These chips don't have PCIe */ + if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + /* No PCIe simulation */ + if (octeon_is_simulation()) + return 0; + + /* Disable PCI if instructed on the command line */ + if (pcie_disable) + return 0; + + /* Point pcibios_map_irq() to the PCIe version of it */ + octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq; + + /* + * PCIe I/O range. It is based on port 0 but includes up until + * port 1's end. + */ + set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0))); + ioport_resource.start = 0; + ioport_resource.end = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1; + + /* + * Create a dummy PCIe controller to swallow up bus 0. IDT bridges + * don't work if the primary bus number is zero. Here we add a fake + * PCIe controller that the kernel will give bus 0. This allows + * us to not change the normal kernel bus enumeration + */ + octeon_dummy_controller.io_map_base = -1; + octeon_dummy_controller.mem_resource->start = (1ull<<48); + octeon_dummy_controller.mem_resource->end = (1ull<<48); + register_pci_controller(&octeon_dummy_controller); + + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { + union cvmx_npei_ctl_status npei_ctl_status; + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + host_mode = npei_ctl_status.s.host_mode; + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE; + } else { + union cvmx_mio_rst_ctlx mio_rst_ctl; + mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(0)); + host_mode = mio_rst_ctl.s.host_mode; + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE2; + } + + if (host_mode) { + pr_notice("PCIe: Initializing port 0\n"); + /* CN63XX pass 1_x/2.0 errata PCIe-15205 */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) { + sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0)); + if (sriox_status_reg.s.srio) { + srio_war15205 += 1; /* Port is SRIO */ + port = 0; + } + } + result = cvmx_pcie_rc_initialize(0); + if (result == 0) { + uint32_t device0; + /* Memory offsets are physical addresses */ + octeon_pcie0_controller.mem_offset = + cvmx_pcie_get_mem_base_address(0); + /* IO offsets are Mips virtual addresses */ + octeon_pcie0_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address + (0)); + octeon_pcie0_controller.io_offset = 0; + /* + * To keep things similar to PCI, we start + * device addresses at the same place as PCI + * uisng big bar support. This normally + * translates to 4GB-256MB, which is the same + * as most x86 PCs. + */ + octeon_pcie0_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(0) + + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie0_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(0) + + cvmx_pcie_get_mem_size(0) - 1; + /* + * Ports must be above 16KB for the ISA bus + * filtering in the PCI-X to PCI bridge. + */ + octeon_pcie0_controller.io_resource->start = 4 << 10; + octeon_pcie0_controller.io_resource->end = + cvmx_pcie_get_io_size(0) - 1; + msleep(100); /* Some devices need extra time */ + register_pci_controller(&octeon_pcie0_controller); + device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0); + enable_pcie_bus_num_war[0] = + device_needs_bus_num_war(device0); + } + } else { + pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n"); + /* CN63XX pass 1_x/2.0 errata PCIe-15205 */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) { + srio_war15205 += 1; + port = 0; + } + } + + if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { + host_mode = 1; + /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + union cvmx_npei_dbg_data dbg_data; + dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if (dbg_data.cn52xx.qlm0_link_width) + host_mode = 0; + } + } else { + union cvmx_mio_rst_ctlx mio_rst_ctl; + mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(1)); + host_mode = mio_rst_ctl.s.host_mode; + } + + if (host_mode) { + pr_notice("PCIe: Initializing port 1\n"); + /* CN63XX pass 1_x/2.0 errata PCIe-15205 */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) { + sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1)); + if (sriox_status_reg.s.srio) { + srio_war15205 += 1; /* Port is SRIO */ + port = 1; + } + } + result = cvmx_pcie_rc_initialize(1); + if (result == 0) { + uint32_t device0; + /* Memory offsets are physical addresses */ + octeon_pcie1_controller.mem_offset = + cvmx_pcie_get_mem_base_address(1); + /* + * To calculate the address for accessing the 2nd PCIe device, + * either 'io_map_base' (pci_iomap()), or 'mips_io_port_base' + * (ioport_map()) value is added to + * pci_resource_start(dev,bar)). The 'mips_io_port_base' is set + * only once based on first PCIe. Also changing 'io_map_base' + * based on first slot's value so that both the routines will + * work properly. + */ + octeon_pcie1_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)); + /* IO offsets are Mips virtual addresses */ + octeon_pcie1_controller.io_offset = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + /* + * To keep things similar to PCI, we start device + * addresses at the same place as PCI uisng big bar + * support. This normally translates to 4GB-256MB, + * which is the same as most x86 PCs. + */ + octeon_pcie1_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(1) + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie1_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(1) + + cvmx_pcie_get_mem_size(1) - 1; + /* + * Ports must be above 16KB for the ISA bus filtering + * in the PCI-X to PCI bridge. + */ + octeon_pcie1_controller.io_resource->start = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + octeon_pcie1_controller.io_resource->end = + octeon_pcie1_controller.io_resource->start + + cvmx_pcie_get_io_size(1) - 1; + msleep(100); /* Some devices need extra time */ + register_pci_controller(&octeon_pcie1_controller); + device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0); + enable_pcie_bus_num_war[1] = + device_needs_bus_num_war(device0); + } + } else { + pr_notice("PCIe: Port 1 not in root complex mode, skipping.\n"); + /* CN63XX pass 1_x/2.0 errata PCIe-15205 */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) { + srio_war15205 += 1; + port = 1; + } + } + + /* + * CN63XX pass 1_x/2.0 errata PCIe-15205 requires setting all + * of SRIO MACs SLI_CTL_PORT*[INT*_MAP] to similar value and + * all of PCIe Macs SLI_CTL_PORT*[INT*_MAP] to different value + * from the previous set values + */ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) || + OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) { + if (srio_war15205 == 1) { + sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(port)); + sli_ctl_portx.s.inta_map = 1; + sli_ctl_portx.s.intb_map = 1; + sli_ctl_portx.s.intc_map = 1; + sli_ctl_portx.s.intd_map = 1; + cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(port), sli_ctl_portx.u64); + + sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(!port)); + sli_ctl_portx.s.inta_map = 0; + sli_ctl_portx.s.intb_map = 0; + sli_ctl_portx.s.intc_map = 0; + sli_ctl_portx.s.intd_map = 0; + cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(!port), sli_ctl_portx.u64); + } + } + + octeon_pci_dma_init(); + + return 0; +} +arch_initcall(octeon_pcie_setup); -- cgit v1.2.3