diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/sh/drivers | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sh/drivers')
39 files changed, 6020 insertions, 0 deletions
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig new file mode 100644 index 000000000..80a45ad28 --- /dev/null +++ b/arch/sh/drivers/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +source "arch/sh/drivers/dma/Kconfig" +source "arch/sh/cchips/Kconfig" + +menu "Additional SuperH Device Drivers" + +config HEARTBEAT + bool "Heartbeat LED" + help + Use the power-on LED on your machine as a load meter. The exact + behavior is platform-dependent, but normally the flash frequency is + a hyperbolic function of the 5-minute load average. + +config PUSH_SWITCH + tristate "Push switch support" + help + This enables support for the push switch framework, a simple + framework that allows for sysfs driven switch status reporting. + +endmenu diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile new file mode 100644 index 000000000..56b0acace --- /dev/null +++ b/arch/sh/drivers/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux SuperH-specific device drivers. +# + +obj-y += dma/ platform_early.o + +obj-$(CONFIG_PCI) += pci/ +obj-$(CONFIG_SUPERHYWAY) += superhyway/ +obj-$(CONFIG_PUSH_SWITCH) += push-switch.o +obj-$(CONFIG_HEARTBEAT) += heartbeat.o diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig new file mode 100644 index 000000000..7d54f284c --- /dev/null +++ b/arch/sh/drivers/dma/Kconfig @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "DMA support" + + +config SH_DMA + bool "SuperH on-chip DMA controller (DMAC) support" + depends on CPU_SH3 || CPU_SH4 + default n + +config SH_DMA_IRQ_MULTI + bool + depends on SH_DMA + default y if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \ + CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7750R || \ + CPU_SUBTYPE_SH7751R || CPU_SUBTYPE_SH7091 || \ + CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7760 + +config SH_DMA_API + depends on SH_DMA + bool "SuperH DMA API support" + default n + help + SH_DMA_API always enabled DMA API of used SuperH. + If you want to use DMA ENGINE, you must not enable this. + Please enable DMA_ENGINE and SH_DMAE. + +config NR_ONCHIP_DMA_CHANNELS + int + depends on SH_DMA + default "4" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \ + CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7091 + default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R || \ + CPU_SUBTYPE_SH7760 + default "12" if CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7724 + default "6" + help + This allows you to specify the number of channels that the on-chip + DMAC supports. This will be 4 for SH7750/SH7751/Sh7750S/SH7091 and 8 for the + SH7750R/SH7751R/SH7760, 12 for the SH7723/SH7780/SH7785/SH7724, default is 6. + +config SH_DMABRG + bool "SH7760 DMABRG support" + depends on CPU_SUBTYPE_SH7760 + help + The DMABRG does data transfers from main memory to Audio/USB units + of the SH7760. + Say Y if you want to use Audio/USB DMA on your SH7760 board. + +config PVR2_DMA + tristate "PowerVR 2 DMAC support" + depends on SH_DREAMCAST && SH_DMA + help + Selecting this will enable support for the PVR2 DMA controller. + As this chains off of the on-chip DMAC, that must also be + enabled by default. + + This is primarily used by the pvr2fb framebuffer driver for + certain optimizations, but is not necessary for functionality. + + If in doubt, say N. + +config G2_DMA + tristate "G2 Bus DMA support" + depends on SH_DREAMCAST && SH_DMA_API + help + This enables support for the DMA controller for the Dreamcast's + G2 bus. Drivers that want this will generally enable this on + their own. + + If in doubt, say N. + +endmenu diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile new file mode 100644 index 000000000..d2fdd5620 --- /dev/null +++ b/arch/sh/drivers/dma/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the SuperH DMA specific kernel interface routines under Linux. +# + +obj-$(CONFIG_SH_DMA_API) += dma-sh.o dma-api.o dma-sysfs.o +obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o +obj-$(CONFIG_G2_DMA) += dma-g2.o +obj-$(CONFIG_SH_DMABRG) += dmabrg.o diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c new file mode 100644 index 000000000..ab9170494 --- /dev/null +++ b/arch/sh/drivers/dma/dma-api.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/dma/dma-api.c + * + * SuperH-specific DMA management API + * + * Copyright (C) 2003, 2004, 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <asm/dma.h> + +DEFINE_SPINLOCK(dma_spin_lock); +static LIST_HEAD(registered_dmac_list); + +struct dma_info *get_dma_info(unsigned int chan) +{ + struct dma_info *info; + + /* + * Look for each DMAC's range to determine who the owner of + * the channel is. + */ + list_for_each_entry(info, ®istered_dmac_list, list) { + if ((chan < info->first_vchannel_nr) || + (chan >= info->first_vchannel_nr + info->nr_channels)) + continue; + + return info; + } + + return NULL; +} +EXPORT_SYMBOL(get_dma_info); + +struct dma_info *get_dma_info_by_name(const char *dmac_name) +{ + struct dma_info *info; + + list_for_each_entry(info, ®istered_dmac_list, list) { + if (dmac_name && (strcmp(dmac_name, info->name) != 0)) + continue; + else + return info; + } + + return NULL; +} +EXPORT_SYMBOL(get_dma_info_by_name); + +static unsigned int get_nr_channels(void) +{ + struct dma_info *info; + unsigned int nr = 0; + + if (unlikely(list_empty(®istered_dmac_list))) + return nr; + + list_for_each_entry(info, ®istered_dmac_list, list) + nr += info->nr_channels; + + return nr; +} + +struct dma_channel *get_dma_channel(unsigned int chan) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel; + int i; + + if (unlikely(!info)) + return ERR_PTR(-EINVAL); + + for (i = 0; i < info->nr_channels; i++) { + channel = &info->channels[i]; + if (channel->vchan == chan) + return channel; + } + + return NULL; +} +EXPORT_SYMBOL(get_dma_channel); + +int get_dma_residue(unsigned int chan) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + if (info->ops->get_residue) + return info->ops->get_residue(channel); + + return 0; +} +EXPORT_SYMBOL(get_dma_residue); + +static int search_cap(const char **haystack, const char *needle) +{ + const char **p; + + for (p = haystack; *p; p++) + if (strcmp(*p, needle) == 0) + return 1; + + return 0; +} + +/** + * request_dma_bycap - Allocate a DMA channel based on its capabilities + * @dmac: List of DMA controllers to search + * @caps: List of capabilities + * + * Search all channels of all DMA controllers to find a channel which + * matches the requested capabilities. The result is the channel + * number if a match is found, or %-ENODEV if no match is found. + * + * Note that not all DMA controllers export capabilities, in which + * case they can never be allocated using this API, and so + * request_dma() must be used specifying the channel number. + */ +int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id) +{ + unsigned int found = 0; + struct dma_info *info; + const char **p; + int i; + + BUG_ON(!dmac || !caps); + + list_for_each_entry(info, ®istered_dmac_list, list) + if (strcmp(*dmac, info->name) == 0) { + found = 1; + break; + } + + if (!found) + return -ENODEV; + + for (i = 0; i < info->nr_channels; i++) { + struct dma_channel *channel = &info->channels[i]; + + if (unlikely(!channel->caps)) + continue; + + for (p = caps; *p; p++) { + if (!search_cap(channel->caps, *p)) + break; + if (request_dma(channel->chan, dev_id) == 0) + return channel->chan; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL(request_dma_bycap); + +int dmac_search_free_channel(const char *dev_id) +{ + struct dma_channel *channel = { 0 }; + struct dma_info *info = get_dma_info(0); + int i; + + for (i = 0; i < info->nr_channels; i++) { + channel = &info->channels[i]; + if (unlikely(!channel)) + return -ENODEV; + + if (atomic_read(&channel->busy) == 0) + break; + } + + if (info->ops->request) { + int result = info->ops->request(channel); + if (result) + return result; + + atomic_set(&channel->busy, 1); + return channel->chan; + } + + return -ENOSYS; +} + +int request_dma(unsigned int chan, const char *dev_id) +{ + struct dma_channel *channel = { 0 }; + struct dma_info *info = get_dma_info(chan); + int result; + + channel = get_dma_channel(chan); + if (atomic_xchg(&channel->busy, 1)) + return -EBUSY; + + strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); + + if (info->ops->request) { + result = info->ops->request(channel); + if (result) + atomic_set(&channel->busy, 0); + + return result; + } + + return 0; +} +EXPORT_SYMBOL(request_dma); + +void free_dma(unsigned int chan) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + if (info->ops->free) + info->ops->free(channel); + + atomic_set(&channel->busy, 0); +} +EXPORT_SYMBOL(free_dma); + +void dma_wait_for_completion(unsigned int chan) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + if (channel->flags & DMA_TEI_CAPABLE) { + wait_event(channel->wait_queue, + (info->ops->get_residue(channel) == 0)); + return; + } + + while (info->ops->get_residue(channel)) + cpu_relax(); +} +EXPORT_SYMBOL(dma_wait_for_completion); + +int register_chan_caps(const char *dmac, struct dma_chan_caps *caps) +{ + struct dma_info *info; + unsigned int found = 0; + int i; + + list_for_each_entry(info, ®istered_dmac_list, list) + if (strcmp(dmac, info->name) == 0) { + found = 1; + break; + } + + if (unlikely(!found)) + return -ENODEV; + + for (i = 0; i < info->nr_channels; i++, caps++) { + struct dma_channel *channel; + + if ((info->first_channel_nr + i) != caps->ch_num) + return -EINVAL; + + channel = &info->channels[i]; + channel->caps = caps->caplist; + } + + return 0; +} +EXPORT_SYMBOL(register_chan_caps); + +void dma_configure_channel(unsigned int chan, unsigned long flags) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + if (info->ops->configure) + info->ops->configure(channel, flags); +} +EXPORT_SYMBOL(dma_configure_channel); + +int dma_xfer(unsigned int chan, unsigned long from, + unsigned long to, size_t size, unsigned int mode) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + channel->sar = from; + channel->dar = to; + channel->count = size; + channel->mode = mode; + + return info->ops->xfer(channel); +} +EXPORT_SYMBOL(dma_xfer); + +int dma_extend(unsigned int chan, unsigned long op, void *param) +{ + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = get_dma_channel(chan); + + if (info->ops->extend) + return info->ops->extend(channel, op, param); + + return -ENOSYS; +} +EXPORT_SYMBOL(dma_extend); + +static int dma_proc_show(struct seq_file *m, void *v) +{ + struct dma_info *info = v; + + if (list_empty(®istered_dmac_list)) + return 0; + + /* + * Iterate over each registered DMAC + */ + list_for_each_entry(info, ®istered_dmac_list, list) { + int i; + + /* + * Iterate over each channel + */ + for (i = 0; i < info->nr_channels; i++) { + struct dma_channel *channel = info->channels + i; + + if (!(channel->flags & DMA_CONFIGURED)) + continue; + + seq_printf(m, "%2d: %14s %s\n", i, + info->name, channel->dev_id); + } + } + + return 0; +} + +int register_dmac(struct dma_info *info) +{ + unsigned int total_channels, i; + + INIT_LIST_HEAD(&info->list); + + printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", + info->name, info->nr_channels, info->nr_channels > 1 ? "s" : ""); + + BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); + + info->pdev = platform_device_register_simple(info->name, -1, + NULL, 0); + if (IS_ERR(info->pdev)) + return PTR_ERR(info->pdev); + + /* + * Don't touch pre-configured channels + */ + if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) { + unsigned int size; + + size = sizeof(struct dma_channel) * info->nr_channels; + + info->channels = kzalloc(size, GFP_KERNEL); + if (!info->channels) + return -ENOMEM; + } + + total_channels = get_nr_channels(); + info->first_vchannel_nr = total_channels; + for (i = 0; i < info->nr_channels; i++) { + struct dma_channel *chan = &info->channels[i]; + + atomic_set(&chan->busy, 0); + + chan->chan = info->first_channel_nr + i; + chan->vchan = info->first_channel_nr + i + total_channels; + + memcpy(chan->dev_id, "Unused", 7); + + if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) + chan->flags |= DMA_TEI_CAPABLE; + + init_waitqueue_head(&chan->wait_queue); + dma_create_sysfs_files(chan, info); + } + + list_add(&info->list, ®istered_dmac_list); + + return 0; +} +EXPORT_SYMBOL(register_dmac); + +void unregister_dmac(struct dma_info *info) +{ + unsigned int i; + + for (i = 0; i < info->nr_channels; i++) + dma_remove_sysfs_files(info->channels + i, info); + + if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) + kfree(info->channels); + + list_del(&info->list); + platform_device_unregister(info->pdev); +} +EXPORT_SYMBOL(unregister_dmac); + +static int __init dma_api_init(void) +{ + printk(KERN_NOTICE "DMA: Registering DMA API.\n"); + return proc_create_single("dma", 0, NULL, dma_proc_show) ? 0 : -ENOMEM; +} +subsys_initcall(dma_api_init); + +MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); +MODULE_DESCRIPTION("DMA API for SuperH"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/drivers/dma/dma-g2.c b/arch/sh/drivers/dma/dma-g2.c new file mode 100644 index 000000000..52a8ae5e3 --- /dev/null +++ b/arch/sh/drivers/dma/dma-g2.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/dma/dma-g2.c + * + * G2 bus DMA support + * + * Copyright (C) 2003 - 2006 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <asm/cacheflush.h> +#include <mach/sysasic.h> +#include <mach/dma.h> +#include <asm/dma.h> + +struct g2_channel { + unsigned long g2_addr; /* G2 bus address */ + unsigned long root_addr; /* Root bus (SH-4) address */ + unsigned long size; /* Size (in bytes), 32-byte aligned */ + unsigned long direction; /* Transfer direction */ + unsigned long ctrl; /* Transfer control */ + unsigned long chan_enable; /* Channel enable */ + unsigned long xfer_enable; /* Transfer enable */ + unsigned long xfer_stat; /* Transfer status */ +} __attribute__ ((aligned(32))); + +struct g2_status { + unsigned long g2_addr; + unsigned long root_addr; + unsigned long size; + unsigned long status; +} __attribute__ ((aligned(16))); + +struct g2_dma_info { + struct g2_channel channel[G2_NR_DMA_CHANNELS]; + unsigned long pad1[G2_NR_DMA_CHANNELS]; + unsigned long wait_state; + unsigned long pad2[10]; + unsigned long magic; + struct g2_status status[G2_NR_DMA_CHANNELS]; +} __attribute__ ((aligned(256))); + +static volatile struct g2_dma_info *g2_dma = (volatile struct g2_dma_info *)0xa05f7800; + +#define g2_bytes_remaining(i) \ + ((g2_dma->channel[i].size - \ + g2_dma->status[i].size) & 0x0fffffff) + +static irqreturn_t g2_dma_interrupt(int irq, void *dev_id) +{ + int i; + + for (i = 0; i < G2_NR_DMA_CHANNELS; i++) { + if (g2_dma->status[i].status & 0x20000000) { + unsigned int bytes = g2_bytes_remaining(i); + + if (likely(bytes == 0)) { + struct dma_info *info = dev_id; + struct dma_channel *chan = info->channels + i; + + wake_up(&chan->wait_queue); + + return IRQ_HANDLED; + } + } + } + + return IRQ_NONE; +} + +static int g2_enable_dma(struct dma_channel *chan) +{ + unsigned int chan_nr = chan->chan; + + g2_dma->channel[chan_nr].chan_enable = 1; + g2_dma->channel[chan_nr].xfer_enable = 1; + + return 0; +} + +static int g2_disable_dma(struct dma_channel *chan) +{ + unsigned int chan_nr = chan->chan; + + g2_dma->channel[chan_nr].chan_enable = 0; + g2_dma->channel[chan_nr].xfer_enable = 0; + + return 0; +} + +static int g2_xfer_dma(struct dma_channel *chan) +{ + unsigned int chan_nr = chan->chan; + + if (chan->sar & 31) { + printk("g2dma: unaligned source 0x%lx\n", chan->sar); + return -EINVAL; + } + + if (chan->dar & 31) { + printk("g2dma: unaligned dest 0x%lx\n", chan->dar); + return -EINVAL; + } + + /* Align the count */ + if (chan->count & 31) + chan->count = (chan->count + (32 - 1)) & ~(32 - 1); + + /* Fixup destination */ + chan->dar += 0xa0800000; + + /* Fixup direction */ + chan->mode = !chan->mode; + + flush_icache_range((unsigned long)chan->sar, chan->count); + + g2_disable_dma(chan); + + g2_dma->channel[chan_nr].g2_addr = chan->dar & 0x1fffffe0; + g2_dma->channel[chan_nr].root_addr = chan->sar & 0x1fffffe0; + g2_dma->channel[chan_nr].size = (chan->count & ~31) | 0x80000000; + g2_dma->channel[chan_nr].direction = chan->mode; + + /* + * bit 0 - ??? + * bit 1 - if set, generate a hardware event on transfer completion + * bit 2 - ??? something to do with suspend? + */ + g2_dma->channel[chan_nr].ctrl = 5; /* ?? */ + + g2_enable_dma(chan); + + /* debug cruft */ + pr_debug("count, sar, dar, mode, ctrl, chan, xfer: %ld, 0x%08lx, " + "0x%08lx, %ld, %ld, %ld, %ld\n", + g2_dma->channel[chan_nr].size, + g2_dma->channel[chan_nr].root_addr, + g2_dma->channel[chan_nr].g2_addr, + g2_dma->channel[chan_nr].direction, + g2_dma->channel[chan_nr].ctrl, + g2_dma->channel[chan_nr].chan_enable, + g2_dma->channel[chan_nr].xfer_enable); + + return 0; +} + +static int g2_get_residue(struct dma_channel *chan) +{ + return g2_bytes_remaining(chan->chan); +} + +static struct dma_ops g2_dma_ops = { + .xfer = g2_xfer_dma, + .get_residue = g2_get_residue, +}; + +static struct dma_info g2_dma_info = { + .name = "g2_dmac", + .nr_channels = 4, + .ops = &g2_dma_ops, + .flags = DMAC_CHANNELS_TEI_CAPABLE, +}; + +static int __init g2_dma_init(void) +{ + int ret; + + ret = request_irq(HW_EVENT_G2_DMA, g2_dma_interrupt, 0, + "g2 DMA handler", &g2_dma_info); + if (unlikely(ret)) + return -EINVAL; + + /* Magic */ + g2_dma->wait_state = 27; + g2_dma->magic = 0x4659404f; + + ret = register_dmac(&g2_dma_info); + if (unlikely(ret != 0)) + free_irq(HW_EVENT_G2_DMA, &g2_dma_info); + + return ret; +} + +static void __exit g2_dma_exit(void) +{ + free_irq(HW_EVENT_G2_DMA, &g2_dma_info); + unregister_dmac(&g2_dma_info); +} + +subsys_initcall(g2_dma_init); +module_exit(g2_dma_exit); + +MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); +MODULE_DESCRIPTION("G2 bus DMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/drivers/dma/dma-pvr2.c b/arch/sh/drivers/dma/dma-pvr2.c new file mode 100644 index 000000000..21c347543 --- /dev/null +++ b/arch/sh/drivers/dma/dma-pvr2.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/dma/dma-pvr2.c + * + * NEC PowerVR 2 (Dreamcast) DMA support + * + * Copyright (C) 2003, 2004 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <mach/sysasic.h> +#include <mach/dma.h> +#include <asm/dma.h> +#include <asm/io.h> + +static unsigned int xfer_complete; +static int count; + +static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id) +{ + if (get_dma_residue(PVR2_CASCADE_CHAN)) { + printk(KERN_WARNING "DMA: SH DMAC did not complete transfer " + "on channel %d, waiting..\n", PVR2_CASCADE_CHAN); + dma_wait_for_completion(PVR2_CASCADE_CHAN); + } + + if (count++ < 10) + pr_debug("Got a pvr2 dma interrupt for channel %d\n", + irq - HW_EVENT_PVR2_DMA); + + xfer_complete = 1; + + return IRQ_HANDLED; +} + +static int pvr2_request_dma(struct dma_channel *chan) +{ + if (__raw_readl(PVR2_DMA_MODE) != 0) + return -EBUSY; + + __raw_writel(0, PVR2_DMA_LMMODE0); + + return 0; +} + +static int pvr2_get_dma_residue(struct dma_channel *chan) +{ + return xfer_complete == 0; +} + +static int pvr2_xfer_dma(struct dma_channel *chan) +{ + if (chan->sar || !chan->dar) + return -EINVAL; + + xfer_complete = 0; + + __raw_writel(chan->dar, PVR2_DMA_ADDR); + __raw_writel(chan->count, PVR2_DMA_COUNT); + __raw_writel(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE); + + return 0; +} + +static struct dma_ops pvr2_dma_ops = { + .request = pvr2_request_dma, + .get_residue = pvr2_get_dma_residue, + .xfer = pvr2_xfer_dma, +}; + +static struct dma_info pvr2_dma_info = { + .name = "pvr2_dmac", + .nr_channels = 1, + .ops = &pvr2_dma_ops, + .flags = DMAC_CHANNELS_TEI_CAPABLE, +}; + +static int __init pvr2_dma_init(void) +{ + if (request_irq(HW_EVENT_PVR2_DMA, pvr2_dma_interrupt, 0, + "pvr2 DMA handler", NULL)) + pr_err("Failed to register pvr2 DMA handler interrupt\n"); + request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade"); + + return register_dmac(&pvr2_dma_info); +} + +static void __exit pvr2_dma_exit(void) +{ + free_dma(PVR2_CASCADE_CHAN); + free_irq(HW_EVENT_PVR2_DMA, 0); + unregister_dmac(&pvr2_dma_info); +} + +subsys_initcall(pvr2_dma_init); +module_exit(pvr2_dma_exit); + +MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); +MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c new file mode 100644 index 000000000..306fba156 --- /dev/null +++ b/arch/sh/drivers/dma/dma-sh.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/dma/dma-sh.c + * + * SuperH On-chip DMAC Support + * + * Copyright (C) 2000 Takashi YOSHII + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2005 Andriy Skulysh + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <mach-dreamcast/mach/dma.h> +#include <asm/dma.h> +#include <asm/dma-register.h> +#include <cpu/dma-register.h> +#include <cpu/dma.h> + +/* + * Some of the SoCs feature two DMAC modules. In such a case, the channels are + * distributed equally among them. + */ +#ifdef SH_DMAC_BASE1 +#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2) +#else +#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS +#endif + +#define SH_DMAC_CH_SZ 0x10 + +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + */ +#define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT)) + +static unsigned long dma_find_base(unsigned int chan) +{ + unsigned long base = SH_DMAC_BASE0; + +#ifdef SH_DMAC_BASE1 + if (chan >= SH_DMAC_NR_MD_CH) + base = SH_DMAC_BASE1; +#endif + + return base; +} + +static unsigned long dma_base_addr(unsigned int chan) +{ + unsigned long base = dma_find_base(chan); + + chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ; + + /* DMAOR is placed inside the channel register space. Step over it. */ + if (chan >= DMAOR) + base += SH_DMAC_CH_SZ; + + return base + chan; +} + +#ifdef CONFIG_SH_DMA_IRQ_MULTI +static inline unsigned int get_dmte_irq(unsigned int chan) +{ + return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ; +} +#else + +static unsigned int dmte_irq_map[] = { + DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3, + +#ifdef DMTE4_IRQ + DMTE4_IRQ, DMTE4_IRQ + 1, +#endif + +#ifdef DMTE6_IRQ + DMTE6_IRQ, DMTE6_IRQ + 1, +#endif + +#ifdef DMTE8_IRQ + DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ, +#endif +}; + +static inline unsigned int get_dmte_irq(unsigned int chan) +{ + return dmte_irq_map[chan]; +} +#endif + +/* + * We determine the correct shift size based off of the CHCR transmit size + * for the given channel. Since we know that it will take: + * + * info->count >> ts_shift[transmit_size] + * + * iterations to complete the transfer. + */ +static unsigned int ts_shift[] = TS_SHIFT; + +static inline unsigned int calc_xmit_shift(struct dma_channel *chan) +{ + u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); + int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | + ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); + + return ts_shift[cnt]; +} + +/* + * The transfer end interrupt must read the chcr register to end the + * hardware interrupt active condition. + * Besides that it needs to waken any waiting process, which should handle + * setting up the next transfer. + */ +static irqreturn_t dma_tei(int irq, void *dev_id) +{ + struct dma_channel *chan = dev_id; + u32 chcr; + + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); + + if (!(chcr & CHCR_TE)) + return IRQ_NONE; + + chcr &= ~(CHCR_IE | CHCR_DE); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); + + wake_up(&chan->wait_queue); + + return IRQ_HANDLED; +} + +static int sh_dmac_request_dma(struct dma_channel *chan) +{ + if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) + return 0; + + return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED, + chan->dev_id, chan); +} + +static void sh_dmac_free_dma(struct dma_channel *chan) +{ + free_irq(get_dmte_irq(chan->chan), chan); +} + +static int +sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) +{ + if (!chcr) + chcr = RS_DUAL | CHCR_IE; + + if (chcr & CHCR_IE) { + chcr &= ~CHCR_IE; + chan->flags |= DMA_TEI_CAPABLE; + } else { + chan->flags &= ~DMA_TEI_CAPABLE; + } + + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); + + chan->flags |= DMA_CONFIGURED; + return 0; +} + +static void sh_dmac_enable_dma(struct dma_channel *chan) +{ + int irq; + u32 chcr; + + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); + chcr |= CHCR_DE; + + if (chan->flags & DMA_TEI_CAPABLE) + chcr |= CHCR_IE; + + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); + + if (chan->flags & DMA_TEI_CAPABLE) { + irq = get_dmte_irq(chan->chan); + enable_irq(irq); + } +} + +static void sh_dmac_disable_dma(struct dma_channel *chan) +{ + int irq; + u32 chcr; + + if (chan->flags & DMA_TEI_CAPABLE) { + irq = get_dmte_irq(chan->chan); + disable_irq(irq); + } + + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); + chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); +} + +static int sh_dmac_xfer_dma(struct dma_channel *chan) +{ + /* + * If we haven't pre-configured the channel with special flags, use + * the defaults. + */ + if (unlikely(!(chan->flags & DMA_CONFIGURED))) + sh_dmac_configure_channel(chan, 0); + + sh_dmac_disable_dma(chan); + + /* + * Single-address mode usage note! + * + * It's important that we don't accidentally write any value to SAR/DAR + * (this includes 0) that hasn't been directly specified by the user if + * we're in single-address mode. + * + * In this case, only one address can be defined, anything else will + * result in a DMA address error interrupt (at least on the SH-4), + * which will subsequently halt the transfer. + * + * Channel 2 on the Dreamcast is a special case, as this is used for + * cascading to the PVR2 DMAC. In this case, we still need to write + * SAR and DAR, regardless of value, in order for cascading to work. + */ + if (chan->sar || (mach_is_dreamcast() && + chan->chan == PVR2_CASCADE_CHAN)) + __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR)); + if (chan->dar || (mach_is_dreamcast() && + chan->chan == PVR2_CASCADE_CHAN)) + __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR)); + + __raw_writel(chan->count >> calc_xmit_shift(chan), + (dma_base_addr(chan->chan) + TCR)); + + sh_dmac_enable_dma(chan); + + return 0; +} + +static int sh_dmac_get_dma_residue(struct dma_channel *chan) +{ + if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE)) + return 0; + + return __raw_readl(dma_base_addr(chan->chan) + TCR) + << calc_xmit_shift(chan); +} + +/* + * DMAOR handling + */ +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) +#define NR_DMAOR 2 +#else +#define NR_DMAOR 1 +#endif + +#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \ + SH_DMAC_NR_MD_CH) + DMAOR) +#define dmaor_write_reg(n, data) __raw_writew(data, \ + dma_find_base((n) * \ + SH_DMAC_NR_MD_CH) + DMAOR) + +static inline int dmaor_reset(int no) +{ + unsigned long dmaor = dmaor_read_reg(no); + + /* Try to clear the error flags first, incase they are set */ + dmaor &= ~(DMAOR_NMIF | DMAOR_AE); + dmaor_write_reg(no, dmaor); + + dmaor |= DMAOR_INIT; + dmaor_write_reg(no, dmaor); + + /* See if we got an error again */ + if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { + printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); + return -EINVAL; + } + + return 0; +} + +/* + * DMAE handling + */ +#ifdef CONFIG_CPU_SH4 + +#if defined(DMAE1_IRQ) +#define NR_DMAE 2 +#else +#define NR_DMAE 1 +#endif + +static const char *dmae_name[] = { + "DMAC Address Error0", + "DMAC Address Error1" +}; + +#ifdef CONFIG_SH_DMA_IRQ_MULTI +static inline unsigned int get_dma_error_irq(int n) +{ + return get_dmte_irq(n * 6); +} +#else + +static unsigned int dmae_irq_map[] = { + DMAE0_IRQ, + +#ifdef DMAE1_IRQ + DMAE1_IRQ, +#endif +}; + +static inline unsigned int get_dma_error_irq(int n) +{ + return dmae_irq_map[n]; +} +#endif + +static irqreturn_t dma_err(int irq, void *dummy) +{ + int i; + + for (i = 0; i < NR_DMAOR; i++) + dmaor_reset(i); + + disable_irq(irq); + + return IRQ_HANDLED; +} + +static int dmae_irq_init(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) { + int i = request_irq(get_dma_error_irq(n), dma_err, + IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); + if (unlikely(i < 0)) { + printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); + return i; + } + } + + return 0; +} + +static void dmae_irq_free(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) + free_irq(get_dma_error_irq(n), NULL); +} +#else +static inline int dmae_irq_init(void) +{ + return 0; +} + +static void dmae_irq_free(void) +{ +} +#endif + +static struct dma_ops sh_dmac_ops = { + .request = sh_dmac_request_dma, + .free = sh_dmac_free_dma, + .get_residue = sh_dmac_get_dma_residue, + .xfer = sh_dmac_xfer_dma, + .configure = sh_dmac_configure_channel, +}; + +static struct dma_info sh_dmac_info = { + .name = "sh_dmac", + .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, + .ops = &sh_dmac_ops, + .flags = DMAC_CHANNELS_TEI_CAPABLE, +}; + +static int __init sh_dmac_init(void) +{ + struct dma_info *info = &sh_dmac_info; + int i, rc; + + /* + * Initialize DMAE, for parts that support it. + */ + rc = dmae_irq_init(); + if (unlikely(rc != 0)) + return rc; + + /* + * Initialize DMAOR, and clean up any error flags that may have + * been set. + */ + for (i = 0; i < NR_DMAOR; i++) { + rc = dmaor_reset(i); + if (unlikely(rc != 0)) + return rc; + } + + return register_dmac(info); +} + +static void __exit sh_dmac_exit(void) +{ + dmae_irq_free(); + unregister_dmac(&sh_dmac_info); +} + +subsys_initcall(sh_dmac_init); +module_exit(sh_dmac_exit); + +MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); +MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c new file mode 100644 index 000000000..8ef318150 --- /dev/null +++ b/arch/sh/drivers/dma/dma-sysfs.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/dma/dma-sysfs.c + * + * sysfs interface for SH DMA API + * + * Copyright (C) 2004 - 2006 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/stat.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/string.h> +#include <asm/dma.h> + +static struct bus_type dma_subsys = { + .name = "dma", + .dev_name = "dma", +}; + +static ssize_t dma_show_devices(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t len = 0; + int i; + + for (i = 0; i < 16; i++) { + struct dma_info *info = get_dma_info(i); + struct dma_channel *channel = get_dma_channel(i); + + if (unlikely(!info) || !channel) + continue; + + len += sprintf(buf + len, "%2d: %14s %s\n", + channel->chan, info->name, + channel->dev_id); + } + + return len; +} + +static DEVICE_ATTR(devices, S_IRUGO, dma_show_devices, NULL); + +static int __init dma_subsys_init(void) +{ + int ret; + + ret = subsys_system_register(&dma_subsys, NULL); + if (unlikely(ret)) + return ret; + + return device_create_file(dma_subsys.dev_root, &dev_attr_devices); +} +postcore_initcall(dma_subsys_init); + +static ssize_t dma_show_dev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_channel *channel = to_dma_channel(dev); + return sprintf(buf, "%s\n", channel->dev_id); +} + +static ssize_t dma_store_dev_id(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + strcpy(channel->dev_id, buf); + return count; +} + +static DEVICE_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id); + +static ssize_t dma_store_config(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + unsigned long config; + + config = simple_strtoul(buf, NULL, 0); + dma_configure_channel(channel->vchan, config); + + return count; +} + +static DEVICE_ATTR(config, S_IWUSR, NULL, dma_store_config); + +static ssize_t dma_show_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_channel *channel = to_dma_channel(dev); + return sprintf(buf, "0x%08x\n", channel->mode); +} + +static ssize_t dma_store_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + channel->mode = simple_strtoul(buf, NULL, 0); + return count; +} + +static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode); + +#define dma_ro_attr(field, fmt) \ +static ssize_t dma_show_##field(struct device *dev, \ + struct device_attribute *attr, char *buf)\ +{ \ + struct dma_channel *channel = to_dma_channel(dev); \ + return sprintf(buf, fmt, channel->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, dma_show_##field, NULL); + +dma_ro_attr(count, "0x%08x\n"); +dma_ro_attr(flags, "0x%08lx\n"); + +int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info) +{ + struct device *dev = &chan->dev; + char name[16]; + int ret; + + dev->id = chan->vchan; + dev->bus = &dma_subsys; + + ret = device_register(dev); + if (ret) + return ret; + + ret |= device_create_file(dev, &dev_attr_dev_id); + ret |= device_create_file(dev, &dev_attr_count); + ret |= device_create_file(dev, &dev_attr_mode); + ret |= device_create_file(dev, &dev_attr_flags); + ret |= device_create_file(dev, &dev_attr_config); + + if (unlikely(ret)) { + dev_err(&info->pdev->dev, "Failed creating attrs\n"); + return ret; + } + + snprintf(name, sizeof(name), "dma%d", chan->chan); + return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name); +} + +void dma_remove_sysfs_files(struct dma_channel *chan, struct dma_info *info) +{ + struct device *dev = &chan->dev; + char name[16]; + + device_remove_file(dev, &dev_attr_dev_id); + device_remove_file(dev, &dev_attr_count); + device_remove_file(dev, &dev_attr_mode); + device_remove_file(dev, &dev_attr_flags); + device_remove_file(dev, &dev_attr_config); + + snprintf(name, sizeof(name), "dma%d", chan->chan); + sysfs_remove_link(&info->pdev->dev.kobj, name); + + device_unregister(dev); +} diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c new file mode 100644 index 000000000..5b2c1fd25 --- /dev/null +++ b/arch/sh/drivers/dma/dmabrg.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7760 DMABRG IRQ handling + * + * (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com> + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <asm/dma.h> +#include <asm/dmabrg.h> +#include <asm/io.h> + +/* + * The DMABRG is a special DMA unit within the SH7760. It does transfers + * from USB-SRAM/Audio units to main memory (and also the LCDC; but that + * part is sensibly placed in the LCDC registers and requires no irqs) + * It has 3 IRQ lines which trigger 10 events, and works independently + * from the traditional SH DMAC (although it blocks usage of DMAC 0) + * + * BRGIRQID | component | dir | meaning | source + * ----------------------------------------------------- + * 0 | USB-DMA | ... | xfer done | DMABRGI1 + * 1 | USB-UAE | ... | USB addr err.| DMABRGI0 + * 2 | HAC0/SSI0 | play| all done | DMABRGI1 + * 3 | HAC0/SSI0 | play| half done | DMABRGI2 + * 4 | HAC0/SSI0 | rec | all done | DMABRGI1 + * 5 | HAC0/SSI0 | rec | half done | DMABRGI2 + * 6 | HAC1/SSI1 | play| all done | DMABRGI1 + * 7 | HAC1/SSI1 | play| half done | DMABRGI2 + * 8 | HAC1/SSI1 | rec | all done | DMABRGI1 + * 9 | HAC1/SSI1 | rec | half done | DMABRGI2 + * + * all can be enabled/disabled in the DMABRGCR register, + * as well as checked if they occurred. + * + * DMABRGI0 services USB DMA Address errors, but it still must be + * enabled/acked in the DMABRGCR register. USB-DMA complete indicator + * is grouped together with the audio buffer end indicators, too bad... + * + * DMABRGCR: Bits 31-24: audio-dma ENABLE flags, + * Bits 23-16: audio-dma STATUS flags, + * Bits 9-8: USB error/xfer ENABLE, + * Bits 1-0: USB error/xfer STATUS. + * Ack an IRQ by writing 0 to the STATUS flag. + * Mask IRQ by writing 0 to ENABLE flag. + * + * Usage is almost like with any other IRQ: + * dmabrg_request_irq(BRGIRQID, handler, data) + * dmabrg_free_irq(BRGIRQID) + * + * handler prototype: void brgirqhandler(void *data) + */ + +#define DMARSRA 0xfe090000 +#define DMAOR 0xffa00040 +#define DMACHCR0 0xffa0000c +#define DMABRGCR 0xfe3c0000 + +#define DMAOR_BRG 0x0000c000 +#define DMAOR_DMEN 0x00000001 + +#define DMABRGI0 68 +#define DMABRGI1 69 +#define DMABRGI2 70 + +struct dmabrg_handler { + void (*handler)(void *); + void *data; +} *dmabrg_handlers; + +static inline void dmabrg_call_handler(int i) +{ + dmabrg_handlers[i].handler(dmabrg_handlers[i].data); +} + +/* + * main DMABRG irq handler. It acks irqs and then + * handles every set and unmasked bit sequentially. + * No locking and no validity checks; it should be + * as fast as possible (audio!) + */ +static irqreturn_t dmabrg_irq(int irq, void *data) +{ + unsigned long dcr; + unsigned int i; + + dcr = __raw_readl(DMABRGCR); + __raw_writel(dcr & ~0x00ff0003, DMABRGCR); /* ack all */ + dcr &= dcr >> 8; /* ignore masked */ + + /* USB stuff, get it out of the way first */ + if (dcr & 1) + dmabrg_call_handler(DMABRGIRQ_USBDMA); + if (dcr & 2) + dmabrg_call_handler(DMABRGIRQ_USBDMAERR); + + /* Audio */ + dcr >>= 16; + while (dcr) { + i = __ffs(dcr); + dcr &= dcr - 1; + dmabrg_call_handler(i + DMABRGIRQ_A0TXF); + } + return IRQ_HANDLED; +} + +static void dmabrg_disable_irq(unsigned int dmairq) +{ + unsigned long dcr; + dcr = __raw_readl(DMABRGCR); + dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8)); + __raw_writel(dcr, DMABRGCR); +} + +static void dmabrg_enable_irq(unsigned int dmairq) +{ + unsigned long dcr; + dcr = __raw_readl(DMABRGCR); + dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8)); + __raw_writel(dcr, DMABRGCR); +} + +int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*), + void *data) +{ + if ((dmairq > 9) || !handler) + return -ENOENT; + if (dmabrg_handlers[dmairq].handler) + return -EBUSY; + + dmabrg_handlers[dmairq].handler = handler; + dmabrg_handlers[dmairq].data = data; + + dmabrg_enable_irq(dmairq); + return 0; +} +EXPORT_SYMBOL_GPL(dmabrg_request_irq); + +void dmabrg_free_irq(unsigned int dmairq) +{ + if (likely(dmairq < 10)) { + dmabrg_disable_irq(dmairq); + dmabrg_handlers[dmairq].handler = NULL; + dmabrg_handlers[dmairq].data = NULL; + } +} +EXPORT_SYMBOL_GPL(dmabrg_free_irq); + +static int __init dmabrg_init(void) +{ + unsigned long or; + int ret; + + dmabrg_handlers = kcalloc(10, sizeof(struct dmabrg_handler), + GFP_KERNEL); + if (!dmabrg_handlers) + return -ENOMEM; + +#ifdef CONFIG_SH_DMA + /* request DMAC channel 0 before anyone else can get it */ + ret = request_dma(0, "DMAC 0 (DMABRG)"); + if (ret < 0) + printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n"); +#endif + + __raw_writel(0, DMABRGCR); + __raw_writel(0, DMACHCR0); + __raw_writel(0x94000000, DMARSRA); /* enable DMABRG in DMAC 0 */ + + /* enable DMABRG mode, enable the DMAC */ + or = __raw_readl(DMAOR); + __raw_writel(or | DMAOR_BRG | DMAOR_DMEN, DMAOR); + + ret = request_irq(DMABRGI0, dmabrg_irq, 0, + "DMABRG USB address error", NULL); + if (ret) + goto out0; + + ret = request_irq(DMABRGI1, dmabrg_irq, 0, + "DMABRG Transfer End", NULL); + if (ret) + goto out1; + + ret = request_irq(DMABRGI2, dmabrg_irq, 0, + "DMABRG Transfer Half", NULL); + if (ret == 0) + return ret; + + free_irq(DMABRGI1, NULL); +out1: free_irq(DMABRGI0, NULL); +out0: kfree(dmabrg_handlers); + return ret; +} +subsys_initcall(dmabrg_init); diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c new file mode 100644 index 000000000..24391b444 --- /dev/null +++ b/arch/sh/drivers/heartbeat.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic heartbeat driver for regular LED banks + * + * Copyright (C) 2007 - 2010 Paul Mundt + * + * Most SH reference boards include a number of individual LEDs that can + * be independently controlled (either via a pre-defined hardware + * function or via the LED class, if desired -- the hardware tends to + * encapsulate some of the same "triggers" that the LED class supports, + * so there's not too much value in it). + * + * Additionally, most of these boards also have a LED bank that we've + * traditionally used for strobing the load average. This use case is + * handled by this driver, rather than giving each LED bit position its + * own struct device. + */ +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/timer.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <asm/heartbeat.h> + +#define DRV_NAME "heartbeat" +#define DRV_VERSION "0.1.2" + +static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +static inline void heartbeat_toggle_bit(struct heartbeat_data *hd, + unsigned bit, unsigned int inverted) +{ + unsigned int new; + + new = (1 << hd->bit_pos[bit]); + if (inverted) + new = ~new; + + new &= hd->mask; + + switch (hd->regsize) { + case 32: + new |= ioread32(hd->base) & ~hd->mask; + iowrite32(new, hd->base); + break; + case 16: + new |= ioread16(hd->base) & ~hd->mask; + iowrite16(new, hd->base); + break; + default: + new |= ioread8(hd->base) & ~hd->mask; + iowrite8(new, hd->base); + break; + } +} + +static void heartbeat_timer(struct timer_list *t) +{ + struct heartbeat_data *hd = from_timer(hd, t, timer); + static unsigned bit = 0, up = 1; + + heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED); + + bit += up; + if ((bit == 0) || (bit == (hd->nr_bits)-1)) + up = -up; + + mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) / + ((avenrun[0] / 5) + (3 << FSHIFT))))); +} + +static int heartbeat_drv_probe(struct platform_device *pdev) +{ + struct resource *res; + struct heartbeat_data *hd; + int i; + + if (unlikely(pdev->num_resources != 1)) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(res == NULL)) { + dev_err(&pdev->dev, "invalid resource\n"); + return -EINVAL; + } + + if (pdev->dev.platform_data) { + hd = pdev->dev.platform_data; + } else { + hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL); + if (unlikely(!hd)) + return -ENOMEM; + } + + hd->base = ioremap(res->start, resource_size(res)); + if (unlikely(!hd->base)) { + dev_err(&pdev->dev, "ioremap failed\n"); + + if (!pdev->dev.platform_data) + kfree(hd); + + return -ENXIO; + } + + if (!hd->nr_bits) { + hd->bit_pos = default_bit_pos; + hd->nr_bits = ARRAY_SIZE(default_bit_pos); + } + + hd->mask = 0; + for (i = 0; i < hd->nr_bits; i++) + hd->mask |= (1 << hd->bit_pos[i]); + + if (!hd->regsize) { + switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { + case IORESOURCE_MEM_32BIT: + hd->regsize = 32; + break; + case IORESOURCE_MEM_16BIT: + hd->regsize = 16; + break; + case IORESOURCE_MEM_8BIT: + default: + hd->regsize = 8; + break; + } + } + + timer_setup(&hd->timer, heartbeat_timer, 0); + platform_set_drvdata(pdev, hd); + + return mod_timer(&hd->timer, jiffies + 1); +} + +static struct platform_driver heartbeat_driver = { + .probe = heartbeat_drv_probe, + .driver = { + .name = DRV_NAME, + .suppress_bind_attrs = true, + }, +}; + +static int __init heartbeat_init(void) +{ + printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION); + return platform_driver_register(&heartbeat_driver); +} +device_initcall(heartbeat_init); diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile new file mode 100644 index 000000000..d313fd3ce --- /dev/null +++ b/arch/sh/drivers/pci/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the PCI specific kernel interface routines under Linux. +# +obj-y += common.o pci.o + +obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o ops-sh4.o +obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += pci-sh7751.o ops-sh4.o +obj-$(CONFIG_CPU_SUBTYPE_SH7763) += pci-sh7780.o ops-sh4.o +obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o +obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o +obj-$(CONFIG_CPU_SUBTYPE_SH7786) += pcie-sh7786.o ops-sh7786.o + +obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \ + pci-dreamcast.o +obj-$(CONFIG_SH_SECUREEDGE5410) += fixups-snapgear.o +obj-$(CONFIG_SH_7751_SOLUTION_ENGINE) += fixups-se7751.o +obj-$(CONFIG_SH_RTS7751R2D) += fixups-rts7751r2d.o +obj-$(CONFIG_SH_SH03) += fixups-sh03.o +obj-$(CONFIG_SH_HIGHLANDER) += fixups-r7780rp.o +obj-$(CONFIG_SH_SH7785LCR) += fixups-r7780rp.o +obj-$(CONFIG_SH_SDK7786) += fixups-sdk7786.o +obj-$(CONFIG_SH_SDK7780) += fixups-sdk7780.o +obj-$(CONFIG_SH_7780_SOLUTION_ENGINE) += fixups-sdk7780.o +obj-$(CONFIG_SH_TITAN) += fixups-titan.o +obj-$(CONFIG_SH_LANDISK) += fixups-landisk.o +obj-$(CONFIG_SH_LBOX_RE2) += fixups-rts7751r2d.o diff --git a/arch/sh/drivers/pci/common.c b/arch/sh/drivers/pci/common.c new file mode 100644 index 000000000..2fd2b77e1 --- /dev/null +++ b/arch/sh/drivers/pci/common.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kernel.h> + +/* + * These functions are used early on before PCI scanning is done + * and all of the pci_dev and pci_bus structures have been created. + */ +static struct pci_dev *fake_pci_dev(struct pci_channel *hose, + int top_bus, int busnr, int devfn) +{ + static struct pci_dev dev; + static struct pci_bus bus; + + dev.bus = &bus; + dev.sysdata = hose; + dev.devfn = devfn; + bus.number = busnr; + bus.sysdata = hose; + bus.ops = hose->pci_ops; + + if(busnr != top_bus) + /* Fake a parent bus structure. */ + bus.parent = &bus; + else + bus.parent = NULL; + + return &dev; +} + +#define EARLY_PCI_OP(rw, size, type) \ +int __init early_##rw##_config_##size(struct pci_channel *hose, \ + int top_bus, int bus, int devfn, int offset, type value) \ +{ \ + return pci_##rw##_config_##size( \ + fake_pci_dev(hose, top_bus, bus, devfn), \ + offset, value); \ +} + +EARLY_PCI_OP(read, byte, u8 *) +EARLY_PCI_OP(read, word, u16 *) +EARLY_PCI_OP(read, dword, u32 *) +EARLY_PCI_OP(write, byte, u8) +EARLY_PCI_OP(write, word, u16) +EARLY_PCI_OP(write, dword, u32) + +int __init pci_is_66mhz_capable(struct pci_channel *hose, + int top_bus, int current_bus) +{ + u32 pci_devfn; + unsigned short vid; + int cap66 = -1; + u16 stat; + + pr_info("PCI: Checking 66MHz capabilities...\n"); + + for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { + if (PCI_FUNC(pci_devfn)) + continue; + if (early_read_config_word(hose, top_bus, current_bus, + pci_devfn, PCI_VENDOR_ID, &vid) != + PCIBIOS_SUCCESSFUL) + continue; + if (vid == 0xffff) + continue; + + /* check 66MHz capability */ + if (cap66 < 0) + cap66 = 1; + if (cap66) { + early_read_config_word(hose, top_bus, current_bus, + pci_devfn, PCI_STATUS, &stat); + if (!(stat & PCI_STATUS_66MHZ)) { + printk(KERN_DEBUG + "PCI: %02x:%02x not 66MHz capable.\n", + current_bus, pci_devfn); + cap66 = 0; + break; + } + } + } + + return cap66 > 0; +} + +static void pcibios_enable_err(struct timer_list *t) +{ + struct pci_channel *hose = from_timer(hose, t, err_timer); + + del_timer(&hose->err_timer); + printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); + enable_irq(hose->err_irq); +} + +static void pcibios_enable_serr(struct timer_list *t) +{ + struct pci_channel *hose = from_timer(hose, t, serr_timer); + + del_timer(&hose->serr_timer); + printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); + enable_irq(hose->serr_irq); +} + +void pcibios_enable_timers(struct pci_channel *hose) +{ + if (hose->err_irq) { + timer_setup(&hose->err_timer, pcibios_enable_err, 0); + } + + if (hose->serr_irq) { + timer_setup(&hose->serr_timer, pcibios_enable_serr, 0); + } +} + +/* + * A simple handler for the regular PCI status errors, called from IRQ + * context. + */ +unsigned int pcibios_handle_status_errors(unsigned long addr, + unsigned int status, + struct pci_channel *hose) +{ + unsigned int cmd = 0; + + if (status & PCI_STATUS_REC_MASTER_ABORT) { + printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr); + cmd |= PCI_STATUS_REC_MASTER_ABORT; + } + + if (status & PCI_STATUS_REC_TARGET_ABORT) { + printk(KERN_DEBUG "PCI: target abort: "); + pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT, 1); + pr_cont("\n"); + + cmd |= PCI_STATUS_REC_TARGET_ABORT; + } + + if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) { + printk(KERN_DEBUG "PCI: parity error detected: "); + pcibios_report_status(PCI_STATUS_PARITY | + PCI_STATUS_DETECTED_PARITY, 1); + pr_cont("\n"); + + cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY; + + /* Now back off of the IRQ for awhile */ + if (hose->err_irq) { + disable_irq_nosync(hose->err_irq); + hose->err_timer.expires = jiffies + HZ; + add_timer(&hose->err_timer); + } + } + + return cmd; +} diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c new file mode 100644 index 000000000..41e4daee8 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-dreamcast.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/fixups-dreamcast.c + * + * PCI fixups for the Sega Dreamcast + * + * Copyright (C) 2001, 2002 M. R. Brown + * Copyright (C) 2002, 2003, 2006 Paul Mundt + * + * This file originally bore the message (with enclosed-$): + * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp + * Dreamcast PCI: Supports SEGA Broadband Adaptor only. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/dma-map-ops.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <mach/pci.h> + +static void gapspci_fixup_resources(struct pci_dev *dev) +{ + struct pci_channel *p = dev->sysdata; + struct resource res; + struct pci_bus_region region; + + printk(KERN_NOTICE "PCI: Fixing up device %s\n", pci_name(dev)); + + switch (dev->device) { + case PCI_DEVICE_ID_SEGA_BBA: + /* + * We also assume that dev->devfn == 0 + */ + dev->resource[1].start = p->resources[0].start + 0x100; + dev->resource[1].end = dev->resource[1].start + 0x200 - 1; + + /* + * This is not a normal BAR, prevent any attempts to move + * the BAR, as this will result in a bus lock. + */ + dev->resource[1].flags |= IORESOURCE_PCI_FIXED; + + /* + * Redirect dma memory allocations to special memory window. + * + * If this GAPSPCI region were mapped by a BAR, the CPU + * phys_addr_t would be pci_resource_start(), and the bus + * address would be pci_bus_address(pci_resource_start()). + * But apparently there's no BAR mapping it, so we just + * "know" its CPU address is GAPSPCI_DMA_BASE. + */ + res.start = GAPSPCI_DMA_BASE; + res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1; + res.flags = IORESOURCE_MEM; + pcibios_resource_to_bus(dev->bus, ®ion, &res); + BUG_ON(dma_declare_coherent_memory(&dev->dev, + res.start, + region.start, + resource_size(&res))); + break; + default: + printk("PCI: Failed resource fixup\n"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, gapspci_fixup_resources); + +int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + /* + * The interrupt routing semantics here are quite trivial. + * + * We basically only support one interrupt, so we only bother + * updating a device's interrupt line with this single shared + * interrupt. Keeps routing quite simple, doesn't it? + */ + return GAPSPCI_IRQ; +} diff --git a/arch/sh/drivers/pci/fixups-landisk.c b/arch/sh/drivers/pci/fixups-landisk.c new file mode 100644 index 000000000..53fa2fc87 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-landisk.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/fixups-landisk.c + * + * PCI initialization for the I-O DATA Device, Inc. LANDISK board + * + * Copyright (C) 2006 kogiidena + * Copyright (C) 2010 Nobuhiro Iwamatsu + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/sh_intc.h> +#include "pci-sh4.h" + +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + /* + * slot0: pin1-4 = irq5,6,7,8 + * slot1: pin1-4 = irq6,7,8,5 + * slot2: pin1-4 = irq7,8,5,6 + * slot3: pin1-4 = irq8,5,6,7 + */ + int irq = ((slot + pin - 1) & 0x3) + evt2irq(0x2a0); + + if ((slot | (pin - 1)) > 0x3) { + printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n", + slot, pin - 1 + 'A'); + return -1; + } + return irq; +} + +int pci_fixup_pcic(struct pci_channel *chan) +{ + unsigned long bcr1, mcr; + + bcr1 = __raw_readl(SH7751_BCR1); + bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ + pci_write_reg(chan, bcr1, SH4_PCIBCR1); + + mcr = __raw_readl(SH7751_MCR); + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + pci_write_reg(chan, mcr, SH4_PCIMCR); + + pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5); + pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6); + pci_write_reg(chan, 0x0c000000, SH4_PCILAR0); + pci_write_reg(chan, 0x00000000, SH4_PCILAR1); + + return 0; +} diff --git a/arch/sh/drivers/pci/fixups-r7780rp.c b/arch/sh/drivers/pci/fixups-r7780rp.c new file mode 100644 index 000000000..3c9139c59 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-r7780rp.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/fixups-r7780rp.c + * + * Highlander R7780RP-1 PCI fixups + * + * Copyright (C) 2003 Lineo uSolutions, Inc. + * Copyright (C) 2004 - 2006 Paul Mundt + */ +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/sh_intc.h> +#include "pci-sh4.h" + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + return evt2irq(0xa20) + slot; +} diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c new file mode 100644 index 000000000..3f0a6fe16 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/fixups-rts7751r2d.c + * + * RTS7751R2D / LBOXRE2 PCI fixups + * + * Copyright (C) 2003 Lineo uSolutions, Inc. + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2007 Nobuhiro Iwamatsu + */ +#include <linux/pci.h> +#include <mach/lboxre2.h> +#include <mach/r2d.h> +#include "pci-sh4.h" +#include <generated/machtypes.h> + +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + +static u8 rts7751r2d_irq_tab[] = { + IRQ_PCI_INTA, + IRQ_PCI_INTB, + IRQ_PCI_INTC, + IRQ_PCI_INTD, +}; + +static char lboxre2_irq_tab[] = { + IRQ_ETH0, IRQ_ETH1, IRQ_INTA, IRQ_INTD, +}; + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + if (mach_is_lboxre2()) + return lboxre2_irq_tab[slot]; + else + return rts7751r2d_irq_tab[slot]; +} + +int pci_fixup_pcic(struct pci_channel *chan) +{ + unsigned long bcr1, mcr; + + bcr1 = __raw_readl(SH7751_BCR1); + bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ + pci_write_reg(chan, bcr1, SH4_PCIBCR1); + + /* Enable all interrupts, so we known what to fix */ + pci_write_reg(chan, 0x0000c3ff, SH4_PCIINTM); + pci_write_reg(chan, 0x0000380f, SH4_PCIAINTM); + + pci_write_reg(chan, 0xfb900047, SH7751_PCICONF1); + pci_write_reg(chan, 0xab000001, SH7751_PCICONF4); + + mcr = __raw_readl(SH7751_MCR); + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + pci_write_reg(chan, mcr, SH4_PCIMCR); + + pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5); + pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6); + pci_write_reg(chan, 0x0c000000, SH4_PCILAR0); + pci_write_reg(chan, 0x00000000, SH4_PCILAR1); + + return 0; +} diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c new file mode 100644 index 000000000..c30604048 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-sdk7780.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/fixups-sdk7780.c + * + * PCI fixups for the SDK7780SE03 + * + * Copyright (C) 2003 Lineo uSolutions, Inc. + * Copyright (C) 2004 - 2006 Paul Mundt + * Copyright (C) 2006 Nobuhiro Iwamatsu + */ +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/sh_intc.h> +#include "pci-sh4.h" + +#define IRQ_INTA evt2irq(0xa20) +#define IRQ_INTB evt2irq(0xa40) +#define IRQ_INTC evt2irq(0xa60) +#define IRQ_INTD evt2irq(0xa80) + +/* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */ +static char sdk7780_irq_tab[4][16] = { + /* INTA */ + { IRQ_INTA, IRQ_INTD, IRQ_INTC, IRQ_INTD, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1 }, + /* INTB */ + { IRQ_INTB, IRQ_INTA, -1, IRQ_INTA, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1 }, + /* INTC */ + { IRQ_INTC, IRQ_INTB, -1, IRQ_INTB, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1 }, + /* INTD */ + { IRQ_INTD, IRQ_INTC, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1 }, +}; + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + return sdk7780_irq_tab[pin-1][slot]; +} diff --git a/arch/sh/drivers/pci/fixups-sdk7786.c b/arch/sh/drivers/pci/fixups-sdk7786.c new file mode 100644 index 000000000..6972af7b4 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-sdk7786.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SDK7786 FPGA PCIe mux handling + * + * Copyright (C) 2010 Paul Mundt + */ +#define pr_fmt(fmt) "PCI: " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <mach/fpga.h> + +/* + * The SDK7786 FPGA supports mangling of most of the slots in some way or + * another. Slots 3/4 are special in that only one can be supported at a + * time, and both appear on port 3 to the PCI bus scan. Enabling slot 4 + * (the horizontal edge connector) will disable slot 3 entirely. + * + * Misconfigurations can be detected through the FPGA via the slot + * resistors to determine card presence. Hotplug remains unsupported. + */ +static unsigned int slot4en __initdata; + +char *__init pcibios_setup(char *str) +{ + if (strcmp(str, "slot4en") == 0) { + slot4en = 1; + return NULL; + } + + return str; +} + +static int __init sdk7786_pci_init(void) +{ + u16 data = fpga_read_reg(PCIECR); + + /* + * Enable slot #4 if it's been specified on the command line. + * + * Optionally reroute if slot #4 has a card present while slot #3 + * does not, regardless of command line value. + * + * Card presence is logically inverted. + */ + slot4en ?: (!(data & PCIECR_PRST4) && (data & PCIECR_PRST3)); + if (slot4en) { + pr_info("Activating PCIe slot#4 (disabling slot#3)\n"); + + data &= ~PCIECR_PCIEMUX1; + fpga_write_reg(data, PCIECR); + + /* Warn about forced rerouting if slot#3 is occupied */ + if ((data & PCIECR_PRST3) == 0) { + pr_warn("Unreachable card detected in slot#3\n"); + return -EBUSY; + } + } else + pr_info("PCIe slot#4 disabled\n"); + + return 0; +} +postcore_initcall(sdk7786_pci_init); diff --git a/arch/sh/drivers/pci/fixups-se7751.c b/arch/sh/drivers/pci/fixups-se7751.c new file mode 100644 index 000000000..608f6521c --- /dev/null +++ b/arch/sh/drivers/pci/fixups-se7751.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/sh_intc.h> +#include "pci-sh4.h" + +int pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin) +{ + switch (slot) { + case 0: return evt2irq(0x3a0); + case 1: return evt2irq(0x3a0); /* AMD Ethernet controller */ + case 2: return -1; + case 3: return -1; + case 4: return -1; + default: + printk("PCI: Bad IRQ mapping request for slot %d\n", slot); + return -1; + } +} + +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + +/* + * Only long word accesses of the PCIC's internal local registers and the + * configuration registers from the CPU is supported. + */ +#define PCIC_WRITE(x,v) writel((v), PCI_REG(x)) +#define PCIC_READ(x) readl(PCI_REG(x)) + +/* + * Description: This function sets up and initializes the pcic, sets + * up the BARS, maps the DRAM into the address space etc, etc. + */ +int pci_fixup_pcic(struct pci_channel *chan) +{ + unsigned long bcr1, wcr1, wcr2, wcr3, mcr; + unsigned short bcr2; + + /* + * Initialize the slave bus controller on the pcic. The values used + * here should not be hardcoded, but they should be taken from the bsc + * on the processor, to make this function as generic as possible. + * (i.e. Another sbc may usr different SDRAM timing settings -- in order + * for the pcic to work, its settings need to be exactly the same.) + */ + bcr1 = (*(volatile unsigned long*)(SH7751_BCR1)); + bcr2 = (*(volatile unsigned short*)(SH7751_BCR2)); + wcr1 = (*(volatile unsigned long*)(SH7751_WCR1)); + wcr2 = (*(volatile unsigned long*)(SH7751_WCR2)); + wcr3 = (*(volatile unsigned long*)(SH7751_WCR3)); + mcr = (*(volatile unsigned long*)(SH7751_MCR)); + + bcr1 = bcr1 | 0x00080000; /* Enable Bit 19, BREQEN */ + (*(volatile unsigned long*)(SH7751_BCR1)) = bcr1; + + bcr1 = bcr1 | 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ + PCIC_WRITE(SH7751_PCIBCR1, bcr1); /* PCIC BCR1 */ + PCIC_WRITE(SH7751_PCIBCR2, bcr2); /* PCIC BCR2 */ + PCIC_WRITE(SH7751_PCIWCR1, wcr1); /* PCIC WCR1 */ + PCIC_WRITE(SH7751_PCIWCR2, wcr2); /* PCIC WCR2 */ + PCIC_WRITE(SH7751_PCIWCR3, wcr3); /* PCIC WCR3 */ + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + PCIC_WRITE(SH7751_PCIMCR, mcr); /* PCIC MCR */ + + + /* Enable all interrupts, so we know what to fix */ + PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff); + PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f); + + /* Set up standard PCI config registers */ + PCIC_WRITE(SH7751_PCICONF1, 0xF39000C7); /* Bus Master, Mem & I/O access */ + PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */ + PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */ + PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM) */ + PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */ + PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */ + PCIC_WRITE(SH7751_PCILSR0, 0x03f00000); /* MEM (full 64M exposed) */ + PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */ + PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */ + PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */ + + /* Now turn it on... */ + PCIC_WRITE(SH7751_PCICR, 0xa5000001); + + /* + * Set PCIMBR and PCIIOBR here, assuming a single window + * (16M MEM, 256K IO) is enough. If a larger space is + * needed, the readx/writex and inx/outx functions will + * have to do more (e.g. setting registers for each call). + */ + + /* + * Set the MBR so PCI address is one-to-one with window, + * meaning all calls go straight through... use BUG_ON to + * catch erroneous assumption. + */ + BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE); + + PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start); + + /* Set IOBR for window containing area specified in pci.h */ + PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK)); + + /* All done, may as well say so... */ + printk("SH7751 PCI: Finished initialization of the PCI controller\n"); + + return 1; +} diff --git a/arch/sh/drivers/pci/fixups-sh03.c b/arch/sh/drivers/pci/fixups-sh03.c new file mode 100644 index 000000000..7ec4a74ab --- /dev/null +++ b/arch/sh/drivers/pci/fixups-sh03.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sh_intc.h> + +int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq; + + if (dev->bus->number == 0) { + switch (slot) { + case 4: return evt2irq(0x2a0); /* eth0 */ + case 8: return evt2irq(0x2a0); /* eth1 */ + case 6: return evt2irq(0x240); /* PCI bridge */ + default: + printk(KERN_ERR "PCI: Bad IRQ mapping request " + "for slot %d\n", slot); + return evt2irq(0x240); + } + } else { + switch (pin) { + case 0: irq = evt2irq(0x240); break; + case 1: irq = evt2irq(0x240); break; + case 2: irq = evt2irq(0x240); break; + case 3: irq = evt2irq(0x240); break; + case 4: irq = evt2irq(0x240); break; + default: irq = -1; break; + } + } + return irq; +} diff --git a/arch/sh/drivers/pci/fixups-snapgear.c b/arch/sh/drivers/pci/fixups-snapgear.c new file mode 100644 index 000000000..317225c09 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-snapgear.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/ops-snapgear.c + * + * Author: David McCullough <davidm@snapgear.com> + * + * Ported to new API by Paul Mundt <lethal@linux-sh.org> + * + * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. + * + * PCI initialization for the SnapGear boards + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sh_intc.h> +#include "pci-sh4.h" + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + int irq = -1; + + switch (slot) { + case 8: /* the PCI bridge */ break; + case 11: irq = evt2irq(0x300); break; /* USB */ + case 12: irq = evt2irq(0x360); break; /* PCMCIA */ + case 13: irq = evt2irq(0x2a0); break; /* eth0 */ + case 14: irq = evt2irq(0x300); break; /* eth1 */ + case 15: irq = evt2irq(0x360); break; /* safenet (unused) */ + } + + printk("PCI: Mapping SnapGear IRQ for slot %d, pin %c to irq %d\n", + slot, pin - 1 + 'A', irq); + + return irq; +} diff --git a/arch/sh/drivers/pci/fixups-titan.c b/arch/sh/drivers/pci/fixups-titan.c new file mode 100644 index 000000000..b5bb65caa --- /dev/null +++ b/arch/sh/drivers/pci/fixups-titan.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/pci/ops-titan.c + * + * Ported to new API by Paul Mundt <lethal@linux-sh.org> + * + * Modified from ops-snapgear.c written by David McCullough + * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. + * + * PCI initialization for the Titan boards + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <mach/titan.h> +#include "pci-sh4.h" + +static char titan_irq_tab[] = { + TITAN_IRQ_WAN, + TITAN_IRQ_LAN, + TITAN_IRQ_MPCIA, + TITAN_IRQ_MPCIB, + TITAN_IRQ_USB, +}; + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + int irq = titan_irq_tab[slot]; + + printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n", + slot, pin - 1 + 'A', irq); + + return irq; +} diff --git a/arch/sh/drivers/pci/ops-dreamcast.c b/arch/sh/drivers/pci/ops-dreamcast.c new file mode 100644 index 000000000..517a8a970 --- /dev/null +++ b/arch/sh/drivers/pci/ops-dreamcast.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI operations for the Sega Dreamcast + * + * Copyright (C) 2001, 2002 M. R. Brown + * Copyright (C) 2002, 2003 Paul Mundt + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/io.h> +#include <mach/pci.h> + +/* + * The !gapspci_config_access case really shouldn't happen, ever, unless + * someone implicitly messes around with the last devfn value.. otherwise we + * only support a single device anyways, and if we didn't have a BBA, we + * wouldn't make it terribly far through the PCI setup anyways. + * + * Also, we could very easily support both Type 0 and Type 1 configurations + * here, but since it doesn't seem that there is any such implementation in + * existence, we don't bother. + * + * I suppose if someone actually gets around to ripping the chip out of + * the BBA and hanging some more devices off of it, then this might be + * something to take into consideration. However, due to the cost of the BBA, + * and the general lack of activity by DC hardware hackers, this doesn't seem + * likely to happen anytime soon. + */ +static int gapspci_config_access(unsigned char bus, unsigned int devfn) +{ + return (bus == 0) && (devfn == 0); +} + +/* + * We can also actually read and write in b/w/l sizes! Thankfully this part + * was at least done right, and we don't have to do the stupid masking and + * shifting that we do on the 7751! Small wonders never cease to amaze. + */ +static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) +{ + *val = 0xffffffff; + + if (!gapspci_config_access(bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; + case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; + case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) +{ + if (!gapspci_config_access(bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; + case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; + case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops gapspci_pci_ops = { + .read = gapspci_read, + .write = gapspci_write, +}; diff --git a/arch/sh/drivers/pci/ops-sh4.c b/arch/sh/drivers/pci/ops-sh4.c new file mode 100644 index 000000000..a205be3bf --- /dev/null +++ b/arch/sh/drivers/pci/ops-sh4.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780). + * + * Copyright (C) 2002 - 2009 Paul Mundt + */ +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <asm/addrspace.h> +#include "pci-sh4.h" + +/* + * Direct access to PCI hardware... + */ +#define CONFIG_CMD(bus, devfn, where) \ + (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) + +/* + * Functions for accessing PCI configuration space with type 1 accesses + */ +static int sh4_pci_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_channel *chan = bus->sysdata; + unsigned long flags; + u32 data; + + /* + * PCIPDR may only be accessed as 32 bit words, + * so we must do byte alignment by hand + */ + raw_spin_lock_irqsave(&pci_config_lock, flags); + pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); + data = pci_read_reg(chan, SH4_PCIPDR); + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + *val = (data >> ((where & 2) << 3)) & 0xffff; + break; + case 4: + *val = data; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + return PCIBIOS_SUCCESSFUL; +} + +/* + * Since SH4 only does 32bit access we'll have to do a read, + * mask,write operation. + * We'll allow an odd byte offset, though it should be illegal. + */ +static int sh4_pci_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_channel *chan = bus->sysdata; + unsigned long flags; + int shift; + u32 data; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); + data = pci_read_reg(chan, SH4_PCIPDR); + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + switch (size) { + case 1: + shift = (where & 3) << 3; + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + break; + case 2: + shift = (where & 2) << 3; + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + break; + case 4: + data = val; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + pci_write_reg(chan, data, SH4_PCIPDR); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops sh4_pci_ops = { + .read = sh4_pci_read, + .write = sh4_pci_write, +}; + +int __attribute__((weak)) pci_fixup_pcic(struct pci_channel *chan) +{ + /* Nothing to do. */ + return 0; +} diff --git a/arch/sh/drivers/pci/ops-sh7786.c b/arch/sh/drivers/pci/ops-sh7786.c new file mode 100644 index 000000000..a10f9f4eb --- /dev/null +++ b/arch/sh/drivers/pci/ops-sh7786.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic SH7786 PCI-Express operations. + * + * Copyright (C) 2009 - 2010 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include "pcie-sh7786.h" + +enum { + PCI_ACCESS_READ, + PCI_ACCESS_WRITE, +}; + +static int sh7786_pcie_config_access(unsigned char access_type, + struct pci_bus *bus, unsigned int devfn, int where, u32 *data) +{ + struct pci_channel *chan = bus->sysdata; + int dev, func, type, reg; + + dev = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + type = !!bus->parent; + reg = where & ~3; + + if (bus->number > 255 || dev > 31 || func > 7) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * While each channel has its own memory-mapped extended config + * space, it's generally only accessible when in endpoint mode. + * When in root complex mode, the controller is unable to target + * itself with either type 0 or type 1 accesses, and indeed, any + * controller initiated target transfer to its own config space + * result in a completer abort. + * + * Each channel effectively only supports a single device, but as + * the same channel <-> device access works for any PCI_SLOT() + * value, we cheat a bit here and bind the controller's config + * space to devfn 0 in order to enable self-enumeration. In this + * case the regular PAR/PDR path is sidelined and the mangled + * config access itself is initiated as a SuperHyway transaction. + */ + if (pci_is_root_bus(bus)) { + if (dev == 0) { + if (access_type == PCI_ACCESS_READ) + *data = pci_read_reg(chan, PCI_REG(reg)); + else + pci_write_reg(chan, *data, PCI_REG(reg)); + + return PCIBIOS_SUCCESSFUL; + } else if (dev > 1) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Clear errors */ + pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR); + + /* Set the PIO address */ + pci_write_reg(chan, (bus->number << 24) | (dev << 19) | + (func << 16) | reg, SH4A_PCIEPAR); + + /* Enable the configuration access */ + pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR); + + /* Check for errors */ + if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check for master and target aborts */ + if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == PCI_ACCESS_READ) + *data = pci_read_reg(chan, SH4A_PCIEPDR); + else + pci_write_reg(chan, *data, SH4A_PCIEPDR); + + /* Disable the configuration access */ + pci_write_reg(chan, 0, SH4A_PCIEPCTLR); + + return PCIBIOS_SUCCESSFUL; +} + +static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + unsigned long flags; + int ret; + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, + devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) { + *val = 0xffffffff; + goto out; + } + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 2) << 3)) & 0xffff; + else + *val = data; + + dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=0x%08lx\n", bus->number, + devfn, where, size, (unsigned long)*val); + +out: + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + return ret; +} + +static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + unsigned long flags; + int shift, ret; + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, + devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) + goto out; + + dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=%08lx\n", bus->number, + devfn, where, size, (unsigned long)val); + + if (size == 1) { + shift = (where & 3) << 3; + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + } else if (size == 2) { + shift = (where & 2) << 3; + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + } else + data = val; + + ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus, + devfn, where, &data); +out: + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + return ret; +} + +struct pci_ops sh7786_pci_ops = { + .read = sh7786_pcie_read, + .write = sh7786_pcie_write, +}; diff --git a/arch/sh/drivers/pci/pci-dreamcast.c b/arch/sh/drivers/pci/pci-dreamcast.c new file mode 100644 index 000000000..4cff2a810 --- /dev/null +++ b/arch/sh/drivers/pci/pci-dreamcast.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI support for the Sega Dreamcast + * + * Copyright (C) 2001, 2002 M. R. Brown + * Copyright (C) 2002, 2003 Paul Mundt + * + * This file originally bore the message (with enclosed-$): + * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp + * Dreamcast PCI: Supports SEGA Broadband Adaptor only. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <mach/pci.h> + +static struct resource gapspci_resources[] = { + { + .name = "GAPSPCI IO", + .start = GAPSPCI_BBA_CONFIG, + .end = GAPSPCI_BBA_CONFIG + GAPSPCI_BBA_CONFIG_SIZE - 1, + .flags = IORESOURCE_IO, + }, { + .name = "GAPSPCI mem", + .start = GAPSPCI_DMA_BASE, + .end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct pci_channel dreamcast_pci_controller = { + .pci_ops = &gapspci_pci_ops, + .resources = gapspci_resources, + .nr_resources = ARRAY_SIZE(gapspci_resources), + .io_offset = 0x00000000, + .mem_offset = 0x00000000, +}; + +/* + * gapspci init + */ + +static int __init gapspci_init(void) +{ + char idbuf[16]; + int i; + + /* + * FIXME: All of this wants documenting to some degree, + * even some basic register definitions would be nice. + * + * I haven't seen anything this ugly since.. maple. + */ + + for (i=0; i<16; i++) + idbuf[i] = inb(GAPSPCI_REGS+i); + + if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16)) + return -ENODEV; + + outl(0x5a14a501, GAPSPCI_REGS+0x18); + + for (i=0; i<1000000; i++) + cpu_relax(); + + if (inl(GAPSPCI_REGS+0x18) != 1) + return -EINVAL; + + outl(0x01000000, GAPSPCI_REGS+0x20); + outl(0x01000000, GAPSPCI_REGS+0x24); + + outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28); + outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c); + + outl(1, GAPSPCI_REGS+0x14); + outl(1, GAPSPCI_REGS+0x34); + + /* Setting Broadband Adapter */ + outw(0xf900, GAPSPCI_BBA_CONFIG+0x06); + outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30); + outb(0x00, GAPSPCI_BBA_CONFIG+0x3c); + outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d); + outw(0x0006, GAPSPCI_BBA_CONFIG+0x04); + outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10); + outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14); + + return register_pci_controller(&dreamcast_pci_controller); +} +arch_initcall(gapspci_init); diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h new file mode 100644 index 000000000..1543c50b6 --- /dev/null +++ b/arch/sh/drivers/pci/pci-sh4.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PCI_SH4_H +#define __PCI_SH4_H + +#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7763) +#include "pci-sh7780.h" +#else +#include "pci-sh7751.h" +#endif + +#include <asm/io.h> + +#define SH4_PCICR 0x100 /* PCI Control Register */ + #define SH4_PCICR_PREFIX 0xA5000000 /* CR prefix for write */ + #define SH4_PCICR_FTO 0x00000400 /* TRDY/IRDY Enable */ + #define SH4_PCICR_TRSB 0x00000200 /* Target Read Single */ + #define SH4_PCICR_BSWP 0x00000100 /* Target Byte Swap */ + #define SH4_PCICR_PLUP 0x00000080 /* Enable PCI Pullup */ + #define SH4_PCICR_ARBM 0x00000040 /* PCI Arbitration Mode */ + #define SH4_PCICR_MD 0x00000030 /* MD9 and MD10 status */ + #define SH4_PCICR_SERR 0x00000008 /* SERR output assert */ + #define SH4_PCICR_INTA 0x00000004 /* INTA output assert */ + #define SH4_PCICR_PRST 0x00000002 /* PCI Reset Assert */ + #define SH4_PCICR_CFIN 0x00000001 /* Central Fun. Init Done */ +#define SH4_PCILSR0 0x104 /* PCI Local Space Register0 */ +#define SH4_PCILSR1 0x108 /* PCI Local Space Register1 */ +#define SH4_PCILAR0 0x10C /* PCI Local Addr Register1 */ +#define SH4_PCILAR1 0x110 /* PCI Local Addr Register1 */ +#define SH4_PCIINT 0x114 /* PCI Interrupt Register */ + #define SH4_PCIINT_MLCK 0x00008000 /* Master Lock Error */ + #define SH4_PCIINT_TABT 0x00004000 /* Target Abort Error */ + #define SH4_PCIINT_TRET 0x00000200 /* Target Retry Error */ + #define SH4_PCIINT_MFDE 0x00000100 /* Master Func. Disable Error */ + #define SH4_PCIINT_PRTY 0x00000080 /* Address Parity Error */ + #define SH4_PCIINT_SERR 0x00000040 /* SERR Detection Error */ + #define SH4_PCIINT_TWDP 0x00000020 /* Tgt. Write Parity Error */ + #define SH4_PCIINT_TRDP 0x00000010 /* Tgt. Read Parity Err Det. */ + #define SH4_PCIINT_MTABT 0x00000008 /* Master-Tgt. Abort Error */ + #define SH4_PCIINT_MMABT 0x00000004 /* Master-Master Abort Error */ + #define SH4_PCIINT_MWPD 0x00000002 /* Master Write PERR Detect */ + #define SH4_PCIINT_MRPD 0x00000001 /* Master Read PERR Detect */ +#define SH4_PCIINTM 0x118 /* PCI Interrupt Mask */ + #define SH4_PCIINTM_TTADIM BIT(14) /* Target-target abort interrupt */ + #define SH4_PCIINTM_TMTOIM BIT(9) /* Target retry timeout */ + #define SH4_PCIINTM_MDEIM BIT(8) /* Master function disable error */ + #define SH4_PCIINTM_APEDIM BIT(7) /* Address parity error detection */ + #define SH4_PCIINTM_SDIM BIT(6) /* SERR detection */ + #define SH4_PCIINTM_DPEITWM BIT(5) /* Data parity error for target write */ + #define SH4_PCIINTM_PEDITRM BIT(4) /* PERR detection for target read */ + #define SH4_PCIINTM_TADIMM BIT(3) /* Target abort for master */ + #define SH4_PCIINTM_MADIMM BIT(2) /* Master abort for master */ + #define SH4_PCIINTM_MWPDIM BIT(1) /* Master write data parity error */ + #define SH4_PCIINTM_MRDPEIM BIT(0) /* Master read data parity error */ +#define SH4_PCIALR 0x11C /* Error Address Register */ +#define SH4_PCICLR 0x120 /* Error Command/Data */ + #define SH4_PCICLR_MPIO 0x80000000 + #define SH4_PCICLR_MDMA0 0x40000000 /* DMA0 Transfer Error */ + #define SH4_PCICLR_MDMA1 0x20000000 /* DMA1 Transfer Error */ + #define SH4_PCICLR_MDMA2 0x10000000 /* DMA2 Transfer Error */ + #define SH4_PCICLR_MDMA3 0x08000000 /* DMA3 Transfer Error */ + #define SH4_PCICLR_TGT 0x04000000 /* Target Transfer Error */ + #define SH4_PCICLR_CMDL 0x0000000F /* PCI Command at Error */ +#define SH4_PCIAINT 0x130 /* Arbiter Interrupt Register */ + #define SH4_PCIAINT_MBKN 0x00002000 /* Master Broken Interrupt */ + #define SH4_PCIAINT_TBTO 0x00001000 /* Target Bus Time Out */ + #define SH4_PCIAINT_MBTO 0x00000800 /* Master Bus Time Out */ + #define SH4_PCIAINT_TABT 0x00000008 /* Target Abort */ + #define SH4_PCIAINT_MABT 0x00000004 /* Master Abort */ + #define SH4_PCIAINT_RDPE 0x00000002 /* Read Data Parity Error */ + #define SH4_PCIAINT_WDPE 0x00000001 /* Write Data Parity Error */ +#define SH4_PCIAINTM 0x134 /* Arbiter Int. Mask Register */ +#define SH4_PCIBMLR 0x138 /* Error Bus Master Register */ + #define SH4_PCIBMLR_REQ4 0x00000010 /* REQ4 bus master at error */ + #define SH4_PCIBMLR_REQ3 0x00000008 /* REQ3 bus master at error */ + #define SH4_PCIBMLR_REQ2 0x00000004 /* REQ2 bus master at error */ + #define SH4_PCIBMLR_REQ1 0x00000002 /* REQ1 bus master at error */ + #define SH4_PCIBMLR_REQ0 0x00000001 /* REQ0 bus master at error */ +#define SH4_PCIDMABT 0x140 /* DMA Transfer Arb. Register */ + #define SH4_PCIDMABT_RRBN 0x00000001 /* DMA Arbitor Round-Robin */ +#define SH4_PCIDPA0 0x180 /* DMA0 Transfer Addr. */ +#define SH4_PCIDLA0 0x184 /* DMA0 Local Addr. */ +#define SH4_PCIDTC0 0x188 /* DMA0 Transfer Cnt. */ +#define SH4_PCIDCR0 0x18C /* DMA0 Control Register */ + #define SH4_PCIDCR_ALGN 0x00000600 /* DMA Alignment Mode */ + #define SH4_PCIDCR_MAST 0x00000100 /* DMA Termination Type */ + #define SH4_PCIDCR_INTM 0x00000080 /* DMA Interrupt Done Mask*/ + #define SH4_PCIDCR_INTS 0x00000040 /* DMA Interrupt Done Status */ + #define SH4_PCIDCR_LHLD 0x00000020 /* Local Address Control */ + #define SH4_PCIDCR_PHLD 0x00000010 /* PCI Address Control*/ + #define SH4_PCIDCR_IOSEL 0x00000008 /* PCI Address Space Type */ + #define SH4_PCIDCR_DIR 0x00000004 /* DMA Transfer Direction */ + #define SH4_PCIDCR_STOP 0x00000002 /* Force DMA Stop */ + #define SH4_PCIDCR_STRT 0x00000001 /* DMA Start */ +#define SH4_PCIDPA1 0x190 /* DMA1 Transfer Addr. */ +#define SH4_PCIDLA1 0x194 /* DMA1 Local Addr. */ +#define SH4_PCIDTC1 0x198 /* DMA1 Transfer Cnt. */ +#define SH4_PCIDCR1 0x19C /* DMA1 Control Register */ +#define SH4_PCIDPA2 0x1A0 /* DMA2 Transfer Addr. */ +#define SH4_PCIDLA2 0x1A4 /* DMA2 Local Addr. */ +#define SH4_PCIDTC2 0x1A8 /* DMA2 Transfer Cnt. */ +#define SH4_PCIDCR2 0x1AC /* DMA2 Control Register */ +#define SH4_PCIDPA3 0x1B0 /* DMA3 Transfer Addr. */ +#define SH4_PCIDLA3 0x1B4 /* DMA3 Local Addr. */ +#define SH4_PCIDTC3 0x1B8 /* DMA3 Transfer Cnt. */ +#define SH4_PCIDCR3 0x1BC /* DMA3 Control Register */ +#define SH4_PCIPAR 0x1C0 /* PIO Address Register */ + #define SH4_PCIPAR_CFGEN 0x80000000 /* Configuration Enable */ + #define SH4_PCIPAR_BUSNO 0x00FF0000 /* Config. Bus Number */ + #define SH4_PCIPAR_DEVNO 0x0000FF00 /* Config. Device Number */ + #define SH4_PCIPAR_REGAD 0x000000FC /* Register Address Number */ +#define SH4_PCIMBR 0x1C4 /* Memory Base Address */ + #define SH4_PCIMBR_MASK 0xFF000000 /* Memory Space Mask */ + #define SH4_PCIMBR_LOCK 0x00000001 /* Lock Memory Space */ +#define SH4_PCIIOBR 0x1C8 /* I/O Base Address Register */ + #define SH4_PCIIOBR_MASK 0xFFFC0000 /* IO Space Mask */ + #define SH4_PCIIOBR_LOCK 0x00000001 /* Lock IO Space */ +#define SH4_PCIPINT 0x1CC /* Power Mgmnt Int. Register */ + #define SH4_PCIPINT_D3 0x00000002 /* D3 Pwr Mgmt. Interrupt */ + #define SH4_PCIPINT_D0 0x00000001 /* D0 Pwr Mgmt. Interrupt */ +#define SH4_PCIPINTM 0x1D0 /* Power Mgmnt Mask Register */ +#define SH4_PCICLKR 0x1D4 /* Clock Ctrl. Register */ + #define SH4_PCICLKR_PCSTP 0x00000002 /* PCI Clock Stop */ + #define SH4_PCICLKR_BCSTP 0x00000001 /* BCLK Clock Stop */ +/* For definitions of BCR, MCR see ... */ +#define SH4_PCIBCR1 0x1E0 /* Memory BCR1 Register */ + #define SH4_PCIMBR0 SH4_PCIBCR1 +#define SH4_PCIBCR2 0x1E4 /* Memory BCR2 Register */ + #define SH4_PCIMBMR0 SH4_PCIBCR2 +#define SH4_PCIWCR1 0x1E8 /* Wait Control 1 Register */ +#define SH4_PCIWCR2 0x1EC /* Wait Control 2 Register */ +#define SH4_PCIWCR3 0x1F0 /* Wait Control 3 Register */ + #define SH4_PCIMBR2 SH4_PCIWCR3 +#define SH4_PCIMCR 0x1F4 /* Memory Control Register */ +#define SH4_PCIBCR3 0x1f8 /* Memory BCR3 Register */ +#define SH4_PCIPCTR 0x200 /* Port Control Register */ + #define SH4_PCIPCTR_P2EN 0x000400000 /* Port 2 Enable */ + #define SH4_PCIPCTR_P1EN 0x000200000 /* Port 1 Enable */ + #define SH4_PCIPCTR_P0EN 0x000100000 /* Port 0 Enable */ + #define SH4_PCIPCTR_P2UP 0x000000020 /* Port2 Pull Up Enable */ + #define SH4_PCIPCTR_P2IO 0x000000010 /* Port2 Output Enable */ + #define SH4_PCIPCTR_P1UP 0x000000008 /* Port1 Pull Up Enable */ + #define SH4_PCIPCTR_P1IO 0x000000004 /* Port1 Output Enable */ + #define SH4_PCIPCTR_P0UP 0x000000002 /* Port0 Pull Up Enable */ + #define SH4_PCIPCTR_P0IO 0x000000001 /* Port0 Output Enable */ +#define SH4_PCIPDTR 0x204 /* Port Data Register */ + #define SH4_PCIPDTR_PB5 0x000000020 /* Port 5 Enable */ + #define SH4_PCIPDTR_PB4 0x000000010 /* Port 4 Enable */ + #define SH4_PCIPDTR_PB3 0x000000008 /* Port 3 Enable */ + #define SH4_PCIPDTR_PB2 0x000000004 /* Port 2 Enable */ + #define SH4_PCIPDTR_PB1 0x000000002 /* Port 1 Enable */ + #define SH4_PCIPDTR_PB0 0x000000001 /* Port 0 Enable */ +#define SH4_PCIPDR 0x220 /* Port IO Data Register */ + +/* arch/sh/kernel/drivers/pci/ops-sh4.c */ +extern struct pci_ops sh4_pci_ops; +int pci_fixup_pcic(struct pci_channel *chan); + +struct sh4_pci_address_space { + unsigned long base; + unsigned long size; +}; + +struct sh4_pci_address_map { + struct sh4_pci_address_space window0; + struct sh4_pci_address_space window1; +}; + +static inline void pci_write_reg(struct pci_channel *chan, + unsigned long val, unsigned long reg) +{ + __raw_writel(val, chan->reg_base + reg); +} + +static inline unsigned long pci_read_reg(struct pci_channel *chan, + unsigned long reg) +{ + return __raw_readl(chan->reg_base + reg); +} + +#endif /* __PCI_SH4_H */ diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c new file mode 100644 index 000000000..11ed21c2e --- /dev/null +++ b/arch/sh/drivers/pci/pci-sh7751.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Low-Level PCI Support for the SH7751 + * + * Copyright (C) 2003 - 2009 Paul Mundt + * Copyright (C) 2001 Dustin McIntire + * + * With cleanup by Paul van Gool <pvangool@mimotech.com>, 2003. + */ +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/io.h> +#include "pci-sh4.h" +#include <asm/addrspace.h> +#include <linux/sizes.h> + +static int __init __area_sdram_check(struct pci_channel *chan, + unsigned int area) +{ + unsigned long word; + + word = __raw_readl(SH7751_BCR1); + /* check BCR for SDRAM in area */ + if (((word >> area) & 1) == 0) { + printk("PCI: Area %d is not configured for SDRAM. BCR1=0x%lx\n", + area, word); + return 0; + } + pci_write_reg(chan, word, SH4_PCIBCR1); + + word = __raw_readw(SH7751_BCR2); + /* check BCR2 for 32bit SDRAM interface*/ + if (((word >> (area << 1)) & 0x3) != 0x3) { + printk("PCI: Area %d is not 32 bit SDRAM. BCR2=0x%lx\n", + area, word); + return 0; + } + pci_write_reg(chan, word, SH4_PCIBCR2); + + return 1; +} + +static struct resource sh7751_pci_resources[] = { + { + .name = "SH7751_IO", + .start = 0x1000, + .end = SZ_4M - 1, + .flags = IORESOURCE_IO + }, { + .name = "SH7751_mem", + .start = SH7751_PCI_MEMORY_BASE, + .end = SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM + }, +}; + +static struct pci_channel sh7751_pci_controller = { + .pci_ops = &sh4_pci_ops, + .resources = sh7751_pci_resources, + .nr_resources = ARRAY_SIZE(sh7751_pci_resources), + .mem_offset = 0x00000000, + .io_offset = 0x00000000, + .io_map_base = SH7751_PCI_IO_BASE, +}; + +static struct sh4_pci_address_map sh7751_pci_map = { + .window0 = { + .base = SH7751_CS3_BASE_ADDR, + .size = 0x04000000, + }, +}; + +static int __init sh7751_pci_init(void) +{ + struct pci_channel *chan = &sh7751_pci_controller; + unsigned int id; + u32 word, reg; + + printk(KERN_NOTICE "PCI: Starting initialization.\n"); + + chan->reg_base = 0xfe200000; + + /* check for SH7751/SH7751R hardware */ + id = pci_read_reg(chan, SH7751_PCICONF0); + if (id != ((SH7751_DEVICE_ID << 16) | SH7751_VENDOR_ID) && + id != ((SH7751R_DEVICE_ID << 16) | SH7751_VENDOR_ID)) { + pr_debug("PCI: This is not an SH7751(R) (%x)\n", id); + return -ENODEV; + } + + /* Set the BCR's to enable PCI access */ + reg = __raw_readl(SH7751_BCR1); + reg |= 0x80000; + __raw_writel(reg, SH7751_BCR1); + + /* Turn the clocks back on (not done in reset)*/ + pci_write_reg(chan, 0, SH4_PCICLKR); + /* Clear Powerdown IRQ's (not done in reset) */ + word = SH4_PCIPINT_D3 | SH4_PCIPINT_D0; + pci_write_reg(chan, word, SH4_PCIPINT); + + /* set the command/status bits to: + * Wait Cycle Control + Parity Enable + Bus Master + + * Mem space enable + */ + word = SH7751_PCICONF1_WCC | SH7751_PCICONF1_PER | + SH7751_PCICONF1_BUM | SH7751_PCICONF1_MES; + pci_write_reg(chan, word, SH7751_PCICONF1); + + /* define this host as the host bridge */ + word = PCI_BASE_CLASS_BRIDGE << 24; + pci_write_reg(chan, word, SH7751_PCICONF2); + + /* Set IO and Mem windows to local address + * Make PCI and local address the same for easy 1 to 1 mapping + */ + word = sh7751_pci_map.window0.size - 1; + pci_write_reg(chan, word, SH4_PCILSR0); + /* Set the values on window 0 PCI config registers */ + word = P2SEGADDR(sh7751_pci_map.window0.base); + pci_write_reg(chan, word, SH4_PCILAR0); + pci_write_reg(chan, word, SH7751_PCICONF5); + + /* Set the local 16MB PCI memory space window to + * the lowest PCI mapped address + */ + word = chan->resources[1].start & SH4_PCIMBR_MASK; + pr_debug("PCI: Setting upper bits of Memory window to 0x%x\n", word); + pci_write_reg(chan, word , SH4_PCIMBR); + + /* Make sure the MSB's of IO window are set to access PCI space + * correctly */ + word = chan->resources[0].start & SH4_PCIIOBR_MASK; + pr_debug("PCI: Setting upper bits of IO window to 0x%x\n", word); + pci_write_reg(chan, word, SH4_PCIIOBR); + + /* Set PCI WCRx, BCRx's, copy from BSC locations */ + + /* check BCR for SDRAM in specified area */ + switch (sh7751_pci_map.window0.base) { + case SH7751_CS0_BASE_ADDR: word = __area_sdram_check(chan, 0); break; + case SH7751_CS1_BASE_ADDR: word = __area_sdram_check(chan, 1); break; + case SH7751_CS2_BASE_ADDR: word = __area_sdram_check(chan, 2); break; + case SH7751_CS3_BASE_ADDR: word = __area_sdram_check(chan, 3); break; + case SH7751_CS4_BASE_ADDR: word = __area_sdram_check(chan, 4); break; + case SH7751_CS5_BASE_ADDR: word = __area_sdram_check(chan, 5); break; + case SH7751_CS6_BASE_ADDR: word = __area_sdram_check(chan, 6); break; + } + + if (!word) + return -1; + + /* configure the wait control registers */ + word = __raw_readl(SH7751_WCR1); + pci_write_reg(chan, word, SH4_PCIWCR1); + word = __raw_readl(SH7751_WCR2); + pci_write_reg(chan, word, SH4_PCIWCR2); + word = __raw_readl(SH7751_WCR3); + pci_write_reg(chan, word, SH4_PCIWCR3); + word = __raw_readl(SH7751_MCR); + pci_write_reg(chan, word, SH4_PCIMCR); + + /* NOTE: I'm ignoring the PCI error IRQs for now.. + * TODO: add support for the internal error interrupts and + * DMA interrupts... + */ + + pci_fixup_pcic(chan); + + /* SH7751 init done, set central function init complete */ + /* use round robin mode to stop a device starving/overruning */ + word = SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_ARBM; + pci_write_reg(chan, word, SH4_PCICR); + + return register_pci_controller(chan); +} +arch_initcall(sh7751_pci_init); diff --git a/arch/sh/drivers/pci/pci-sh7751.h b/arch/sh/drivers/pci/pci-sh7751.h new file mode 100644 index 000000000..d1951e50e --- /dev/null +++ b/arch/sh/drivers/pci/pci-sh7751.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Low-Level PCI Support for SH7751 targets + * + * Dustin McIntire (dustin@sensoria.com) (c) 2001 + * Paul Mundt (lethal@linux-sh.org) (c) 2003 + */ + +#ifndef _PCI_SH7751_H_ +#define _PCI_SH7751_H_ + +/* Platform Specific Values */ +#define SH7751_VENDOR_ID 0x1054 +#define SH7751_DEVICE_ID 0x3505 +#define SH7751R_DEVICE_ID 0x350e + +/* SH7751 Specific Values */ +#define SH7751_PCI_CONFIG_BASE 0xFD000000 /* Config space base addr */ +#define SH7751_PCI_CONFIG_SIZE 0x1000000 /* Config space size */ +#define SH7751_PCI_MEMORY_BASE 0xFD000000 /* Memory space base addr */ +#define SH7751_PCI_MEM_SIZE 0x01000000 /* Size of Memory window */ +#define SH7751_PCI_IO_BASE 0xFE240000 /* IO space base address */ +#define SH7751_PCI_IO_SIZE 0x40000 /* Size of IO window */ + +#define SH7751_PCIREG_BASE 0xFE200000 /* PCI regs base address */ + +#define SH7751_PCICONF0 0x0 /* PCI Config Reg 0 */ + #define SH7751_PCICONF0_DEVID 0xFFFF0000 /* Device ID */ + #define SH7751_PCICONF0_VNDID 0x0000FFFF /* Vendor ID */ +#define SH7751_PCICONF1 0x4 /* PCI Config Reg 1 */ + #define SH7751_PCICONF1_DPE 0x80000000 /* Data Parity Error */ + #define SH7751_PCICONF1_SSE 0x40000000 /* System Error Status */ + #define SH7751_PCICONF1_RMA 0x20000000 /* Master Abort */ + #define SH7751_PCICONF1_RTA 0x10000000 /* Target Abort Rx Status */ + #define SH7751_PCICONF1_STA 0x08000000 /* Target Abort Exec Status */ + #define SH7751_PCICONF1_DEV 0x06000000 /* Timing Status */ + #define SH7751_PCICONF1_DPD 0x01000000 /* Data Parity Status */ + #define SH7751_PCICONF1_FBBC 0x00800000 /* Back 2 Back Status */ + #define SH7751_PCICONF1_UDF 0x00400000 /* User Defined Status */ + #define SH7751_PCICONF1_66M 0x00200000 /* 66Mhz Operation Status */ + #define SH7751_PCICONF1_PM 0x00100000 /* Power Management Status */ + #define SH7751_PCICONF1_PBBE 0x00000200 /* Back 2 Back Control */ + #define SH7751_PCICONF1_SER 0x00000100 /* SERR Output Control */ + #define SH7751_PCICONF1_WCC 0x00000080 /* Wait Cycle Control */ + #define SH7751_PCICONF1_PER 0x00000040 /* Parity Error Response */ + #define SH7751_PCICONF1_VPS 0x00000020 /* VGA Pallet Snoop */ + #define SH7751_PCICONF1_MWIE 0x00000010 /* Memory Write+Invalidate */ + #define SH7751_PCICONF1_SPC 0x00000008 /* Special Cycle Control */ + #define SH7751_PCICONF1_BUM 0x00000004 /* Bus Master Control */ + #define SH7751_PCICONF1_MES 0x00000002 /* Memory Space Control */ + #define SH7751_PCICONF1_IOS 0x00000001 /* I/O Space Control */ +#define SH7751_PCICONF2 0x8 /* PCI Config Reg 2 */ + #define SH7751_PCICONF2_BCC 0xFF000000 /* Base Class Code */ + #define SH7751_PCICONF2_SCC 0x00FF0000 /* Sub-Class Code */ + #define SH7751_PCICONF2_RLPI 0x0000FF00 /* Programming Interface */ + #define SH7751_PCICONF2_REV 0x000000FF /* Revision ID */ +#define SH7751_PCICONF3 0xC /* PCI Config Reg 3 */ + #define SH7751_PCICONF3_BIST7 0x80000000 /* Bist Supported */ + #define SH7751_PCICONF3_BIST6 0x40000000 /* Bist Executing */ + #define SH7751_PCICONF3_BIST3_0 0x0F000000 /* Bist Passed */ + #define SH7751_PCICONF3_HD7 0x00800000 /* Single Function device */ + #define SH7751_PCICONF3_HD6_0 0x007F0000 /* Configuration Layout */ + #define SH7751_PCICONF3_LAT 0x0000FF00 /* Latency Timer */ + #define SH7751_PCICONF3_CLS 0x000000FF /* Cache Line Size */ +#define SH7751_PCICONF4 0x10 /* PCI Config Reg 4 */ + #define SH7751_PCICONF4_BASE 0xFFFFFFFC /* I/O Space Base Addr */ + #define SH7751_PCICONF4_ASI 0x00000001 /* Address Space Type */ +#define SH7751_PCICONF5 0x14 /* PCI Config Reg 5 */ + #define SH7751_PCICONF5_BASE 0xFFFFFFF0 /* Mem Space Base Addr */ + #define SH7751_PCICONF5_LAP 0x00000008 /* Prefetch Enabled */ + #define SH7751_PCICONF5_LAT 0x00000006 /* Local Memory type */ + #define SH7751_PCICONF5_ASI 0x00000001 /* Address Space Type */ +#define SH7751_PCICONF6 0x18 /* PCI Config Reg 6 */ + #define SH7751_PCICONF6_BASE 0xFFFFFFF0 /* Mem Space Base Addr */ + #define SH7751_PCICONF6_LAP 0x00000008 /* Prefetch Enabled */ + #define SH7751_PCICONF6_LAT 0x00000006 /* Local Memory type */ + #define SH7751_PCICONF6_ASI 0x00000001 /* Address Space Type */ +/* PCICONF7 - PCICONF10 are undefined */ +#define SH7751_PCICONF11 0x2C /* PCI Config Reg 11 */ + #define SH7751_PCICONF11_SSID 0xFFFF0000 /* Subsystem ID */ + #define SH7751_PCICONF11_SVID 0x0000FFFF /* Subsystem Vendor ID */ +/* PCICONF12 is undefined */ +#define SH7751_PCICONF13 0x34 /* PCI Config Reg 13 */ + #define SH7751_PCICONF13_CPTR 0x000000FF /* PM function pointer */ +/* PCICONF14 is undefined */ +#define SH7751_PCICONF15 0x3C /* PCI Config Reg 15 */ + #define SH7751_PCICONF15_IPIN 0x000000FF /* Interrupt Pin */ +#define SH7751_PCICONF16 0x40 /* PCI Config Reg 16 */ + #define SH7751_PCICONF16_PMES 0xF8000000 /* PME Support */ + #define SH7751_PCICONF16_D2S 0x04000000 /* D2 Support */ + #define SH7751_PCICONF16_D1S 0x02000000 /* D1 Support */ + #define SH7751_PCICONF16_DSI 0x00200000 /* Bit Device Init. */ + #define SH7751_PCICONF16_PMCK 0x00080000 /* Clock for PME req. */ + #define SH7751_PCICONF16_VER 0x00070000 /* PM Version */ + #define SH7751_PCICONF16_NIP 0x0000FF00 /* Next Item Pointer */ + #define SH7751_PCICONF16_CID 0x000000FF /* Capability Identifier */ +#define SH7751_PCICONF17 0x44 /* PCI Config Reg 17 */ + #define SH7751_PCICONF17_DATA 0xFF000000 /* Data field for PM */ + #define SH7751_PCICONF17_PMES 0x00800000 /* PME Status */ + #define SH7751_PCICONF17_DSCL 0x00600000 /* Data Scaling Value */ + #define SH7751_PCICONF17_DSEL 0x001E0000 /* Data Select */ + #define SH7751_PCICONF17_PMEN 0x00010000 /* PME Enable */ + #define SH7751_PCICONF17_PWST 0x00000003 /* Power State */ +/* SH7715 Internal PCI Registers */ + +/* Memory Control Registers */ +#define SH7751_BCR1 0xFF800000 /* Memory BCR1 Register */ +#define SH7751_BCR2 0xFF800004 /* Memory BCR2 Register */ +#define SH7751_BCR3 0xFF800050 /* Memory BCR3 Register */ +#define SH7751_BCR4 0xFE0A00F0 /* Memory BCR4 Register */ +#define SH7751_WCR1 0xFF800008 /* Wait Control 1 Register */ +#define SH7751_WCR2 0xFF80000C /* Wait Control 2 Register */ +#define SH7751_WCR3 0xFF800010 /* Wait Control 3 Register */ +#define SH7751_MCR 0xFF800014 /* Memory Control Register */ + +/* General Memory Config Addresses */ +#define SH7751_CS0_BASE_ADDR 0x0 +#define SH7751_MEM_REGION_SIZE 0x04000000 +#define SH7751_CS1_BASE_ADDR (SH7751_CS0_BASE_ADDR + SH7751_MEM_REGION_SIZE) +#define SH7751_CS2_BASE_ADDR (SH7751_CS1_BASE_ADDR + SH7751_MEM_REGION_SIZE) +#define SH7751_CS3_BASE_ADDR (SH7751_CS2_BASE_ADDR + SH7751_MEM_REGION_SIZE) +#define SH7751_CS4_BASE_ADDR (SH7751_CS3_BASE_ADDR + SH7751_MEM_REGION_SIZE) +#define SH7751_CS5_BASE_ADDR (SH7751_CS4_BASE_ADDR + SH7751_MEM_REGION_SIZE) +#define SH7751_CS6_BASE_ADDR (SH7751_CS5_BASE_ADDR + SH7751_MEM_REGION_SIZE) + +#endif /* _PCI_SH7751_H_ */ diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c new file mode 100644 index 000000000..9a624a6ee --- /dev/null +++ b/arch/sh/drivers/pci/pci-sh7780.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Low-Level PCI Support for the SH7780 + * + * Copyright (C) 2005 - 2010 Paul Mundt + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/irq.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include "pci-sh4.h" +#include <asm/mmu.h> +#include <linux/sizes.h> + +#if defined(CONFIG_CPU_BIG_ENDIAN) +# define PCICR_ENDIANNESS SH4_PCICR_BSWP +#else +# define PCICR_ENDIANNESS 0 +#endif + + +static struct resource sh7785_pci_resources[] = { + { + .name = "PCI IO", + .start = 0x1000, + .end = SZ_4M - 1, + .flags = IORESOURCE_IO, + }, { + .name = "PCI MEM 0", + .start = 0xfd000000, + .end = 0xfd000000 + SZ_16M - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "PCI MEM 1", + .start = 0x10000000, + .end = 0x10000000 + SZ_64M - 1, + .flags = IORESOURCE_MEM, + }, { + /* + * 32-bit only resources must be last. + */ + .name = "PCI MEM 2", + .start = 0xc0000000, + .end = 0xc0000000 + SZ_512M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, +}; + +static struct pci_channel sh7780_pci_controller = { + .pci_ops = &sh4_pci_ops, + .resources = sh7785_pci_resources, + .nr_resources = ARRAY_SIZE(sh7785_pci_resources), + .io_offset = 0, + .mem_offset = 0, + .io_map_base = 0xfe200000, + .serr_irq = evt2irq(0xa00), + .err_irq = evt2irq(0xaa0), +}; + +struct pci_errors { + unsigned int mask; + const char *str; +} pci_arbiter_errors[] = { + { SH4_PCIAINT_MBKN, "master broken" }, + { SH4_PCIAINT_TBTO, "target bus time out" }, + { SH4_PCIAINT_MBTO, "master bus time out" }, + { SH4_PCIAINT_TABT, "target abort" }, + { SH4_PCIAINT_MABT, "master abort" }, + { SH4_PCIAINT_RDPE, "read data parity error" }, + { SH4_PCIAINT_WDPE, "write data parity error" }, +}, pci_interrupt_errors[] = { + { SH4_PCIINT_MLCK, "master lock error" }, + { SH4_PCIINT_TABT, "target-target abort" }, + { SH4_PCIINT_TRET, "target retry time out" }, + { SH4_PCIINT_MFDE, "master function disable error" }, + { SH4_PCIINT_PRTY, "address parity error" }, + { SH4_PCIINT_SERR, "SERR" }, + { SH4_PCIINT_TWDP, "data parity error for target write" }, + { SH4_PCIINT_TRDP, "PERR detected for target read" }, + { SH4_PCIINT_MTABT, "target abort for master" }, + { SH4_PCIINT_MMABT, "master abort for master" }, + { SH4_PCIINT_MWPD, "master write data parity error" }, + { SH4_PCIINT_MRPD, "master read data parity error" }, +}; + +static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id) +{ + struct pci_channel *hose = dev_id; + unsigned long addr; + unsigned int status; + unsigned int cmd; + int i; + + addr = __raw_readl(hose->reg_base + SH4_PCIALR); + + /* + * Handle status errors. + */ + status = __raw_readw(hose->reg_base + PCI_STATUS); + if (status & (PCI_STATUS_PARITY | + PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT)) { + cmd = pcibios_handle_status_errors(addr, status, hose); + if (likely(cmd)) + __raw_writew(cmd, hose->reg_base + PCI_STATUS); + } + + /* + * Handle arbiter errors. + */ + status = __raw_readl(hose->reg_base + SH4_PCIAINT); + for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) { + if (status & pci_arbiter_errors[i].mask) { + printk(KERN_DEBUG "PCI: %s, addr=%08lx\n", + pci_arbiter_errors[i].str, addr); + cmd |= pci_arbiter_errors[i].mask; + } + } + __raw_writel(cmd, hose->reg_base + SH4_PCIAINT); + + /* + * Handle the remaining PCI errors. + */ + status = __raw_readl(hose->reg_base + SH4_PCIINT); + for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) { + if (status & pci_interrupt_errors[i].mask) { + printk(KERN_DEBUG "PCI: %s, addr=%08lx\n", + pci_interrupt_errors[i].str, addr); + cmd |= pci_interrupt_errors[i].mask; + } + } + __raw_writel(cmd, hose->reg_base + SH4_PCIINT); + + return IRQ_HANDLED; +} + +static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id) +{ + struct pci_channel *hose = dev_id; + + printk(KERN_DEBUG "PCI: system error received: "); + pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1); + pr_cont("\n"); + + /* Deassert SERR */ + __raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM); + + /* Back off the IRQ for awhile */ + disable_irq_nosync(irq); + hose->serr_timer.expires = jiffies + HZ; + add_timer(&hose->serr_timer); + + return IRQ_HANDLED; +} + +static int __init sh7780_pci_setup_irqs(struct pci_channel *hose) +{ + int ret; + + /* Clear out PCI arbiter IRQs */ + __raw_writel(0, hose->reg_base + SH4_PCIAINT); + + /* Clear all error conditions */ + __raw_writew(PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_SIG_TARGET_ABORT | \ + PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS); + + ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0, + "PCI SERR interrupt", hose); + if (unlikely(ret)) { + pr_err("PCI: Failed hooking SERR IRQ\n"); + return ret; + } + + /* + * The PCI ERR IRQ needs to be IRQF_SHARED since all of the power + * down IRQ vectors are routed through the ERR IRQ vector. We + * only request_irq() once as there is only a single masking + * source for multiple events. + */ + ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED, + "PCI ERR interrupt", hose); + if (unlikely(ret)) { + free_irq(hose->serr_irq, hose); + return ret; + } + + /* Unmask all of the arbiter IRQs. */ + __raw_writel(SH4_PCIAINT_MBKN | SH4_PCIAINT_TBTO | SH4_PCIAINT_MBTO | \ + SH4_PCIAINT_TABT | SH4_PCIAINT_MABT | SH4_PCIAINT_RDPE | \ + SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM); + + /* Unmask all of the PCI IRQs */ + __raw_writel(SH4_PCIINTM_TTADIM | SH4_PCIINTM_TMTOIM | \ + SH4_PCIINTM_MDEIM | SH4_PCIINTM_APEDIM | \ + SH4_PCIINTM_SDIM | SH4_PCIINTM_DPEITWM | \ + SH4_PCIINTM_PEDITRM | SH4_PCIINTM_TADIMM | \ + SH4_PCIINTM_MADIMM | SH4_PCIINTM_MWPDIM | \ + SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM); + + return ret; +} + +static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose) +{ + free_irq(hose->err_irq, hose); + free_irq(hose->serr_irq, hose); +} + +static void __init sh7780_pci66_init(struct pci_channel *hose) +{ + unsigned int tmp; + + if (!pci_is_66mhz_capable(hose, 0, 0)) + return; + + /* Enable register access */ + tmp = __raw_readl(hose->reg_base + SH4_PCICR); + tmp |= SH4_PCICR_PREFIX; + __raw_writel(tmp, hose->reg_base + SH4_PCICR); + + /* Enable 66MHz operation */ + tmp = __raw_readw(hose->reg_base + PCI_STATUS); + tmp |= PCI_STATUS_66MHZ; + __raw_writew(tmp, hose->reg_base + PCI_STATUS); + + /* Done */ + tmp = __raw_readl(hose->reg_base + SH4_PCICR); + tmp |= SH4_PCICR_PREFIX | SH4_PCICR_CFIN; + __raw_writel(tmp, hose->reg_base + SH4_PCICR); +} + +static int __init sh7780_pci_init(void) +{ + struct pci_channel *chan = &sh7780_pci_controller; + phys_addr_t memphys; + size_t memsize; + unsigned int id; + const char *type; + int ret, i; + + pr_notice("PCI: Starting initialization.\n"); + + chan->reg_base = 0xfe040000; + + /* Enable CPU access to the PCIC registers. */ + __raw_writel(PCIECR_ENBL, PCIECR); + + /* Reset */ + __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS, + chan->reg_base + SH4_PCICR); + + /* + * Wait for it to come back up. The spec says to allow for up to + * 1 second after toggling the reset pin, but in practice 100ms + * is more than enough. + */ + mdelay(100); + + id = __raw_readw(chan->reg_base + PCI_VENDOR_ID); + if (id != PCI_VENDOR_ID_RENESAS) { + pr_err("PCI: Unknown vendor ID 0x%04x.\n", id); + return -ENODEV; + } + + id = __raw_readw(chan->reg_base + PCI_DEVICE_ID); + type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" : + (id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" : + (id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" : + (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" : + NULL; + if (unlikely(!type)) { + pr_err("PCI: Found an unsupported Renesas host controller, device id 0x%04x.\n", + id); + return -EINVAL; + } + + pr_notice("PCI: Found a Renesas %s host controller, revision %d.\n", + type, __raw_readb(chan->reg_base + PCI_REVISION_ID)); + + /* + * Now throw it in to register initialization mode and + * start the real work. + */ + __raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS, + chan->reg_base + SH4_PCICR); + + memphys = __pa(memory_start); + memsize = roundup_pow_of_two(memory_end - memory_start); + + /* + * If there's more than 512MB of memory, we need to roll over to + * LAR1/LSR1. + */ + if (memsize > SZ_512M) { + __raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1); + __raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1, + chan->reg_base + SH4_PCILSR1); + memsize = SZ_512M; + } else { + /* + * Otherwise just zero it out and disable it. + */ + __raw_writel(0, chan->reg_base + SH4_PCILAR1); + __raw_writel(0, chan->reg_base + SH4_PCILSR1); + } + + /* + * LAR0/LSR0 covers up to the first 512MB, which is enough to + * cover all of lowmem on most platforms. + */ + __raw_writel(memphys, chan->reg_base + SH4_PCILAR0); + __raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1, + chan->reg_base + SH4_PCILSR0); + + /* + * Hook up the ERR and SERR IRQs. + */ + ret = sh7780_pci_setup_irqs(chan); + if (unlikely(ret)) + return ret; + + /* + * Disable the cache snoop controller for non-coherent DMA. + */ + __raw_writel(0, chan->reg_base + SH7780_PCICSCR0); + __raw_writel(0, chan->reg_base + SH7780_PCICSAR0); + __raw_writel(0, chan->reg_base + SH7780_PCICSCR1); + __raw_writel(0, chan->reg_base + SH7780_PCICSAR1); + + /* + * Setup the memory BARs + */ + for (i = 1; i < chan->nr_resources; i++) { + struct resource *res = chan->resources + i; + resource_size_t size; + + if (unlikely(res->flags & IORESOURCE_IO)) + continue; + + /* + * Make sure we're in the right physical addressing mode + * for dealing with the resource. + */ + if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) { + chan->nr_resources--; + continue; + } + + size = resource_size(res); + + /* + * The MBMR mask is calculated in units of 256kB, which + * keeps things pretty simple. + */ + __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18, + chan->reg_base + SH7780_PCIMBMR(i - 1)); + __raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1)); + } + + /* + * And I/O. + */ + __raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0); + __raw_writel(0, chan->reg_base + SH7780_PCIIOBR); + __raw_writel(0, chan->reg_base + SH7780_PCIIOBMR); + + __raw_writew(PCI_COMMAND_SERR | PCI_COMMAND_WAIT | \ + PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | \ + PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND); + + /* + * Initialization mode complete, release the control register and + * enable round robin mode to stop device overruns/starvation. + */ + __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO | + PCICR_ENDIANNESS, + chan->reg_base + SH4_PCICR); + + ret = register_pci_controller(chan); + if (unlikely(ret)) + goto err; + + sh7780_pci66_init(chan); + + pr_notice("PCI: Running at %dMHz.\n", + (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) + ? 66 : 33); + + return 0; + +err: + sh7780_pci_teardown_irqs(chan); + return ret; +} +arch_initcall(sh7780_pci_init); diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h new file mode 100644 index 000000000..e2ac770f8 --- /dev/null +++ b/arch/sh/drivers/pci/pci-sh7780.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Low-Level PCI Support for SH7780 targets + * + * Dustin McIntire (dustin@sensoria.com) (c) 2001 + * Paul Mundt (lethal@linux-sh.org) (c) 2003 + */ + +#ifndef _PCI_SH7780_H_ +#define _PCI_SH7780_H_ + +/* SH7780 Control Registers */ +#define PCIECR 0xFE000008 +#define PCIECR_ENBL 0x01 + +/* SH7780 Specific Values */ +#define SH7780_PCI_CONFIG_BASE 0xFD000000 /* Config space base addr */ +#define SH7780_PCI_CONFIG_SIZE 0x01000000 /* Config space size */ + +#define SH7780_PCIREG_BASE 0xFE040000 /* PCI regs base address */ + +/* SH7780 PCI Config Registers */ +#define SH7780_PCIIR 0x114 /* PCI Interrupt Register */ +#define SH7780_PCIIMR 0x118 /* PCI Interrupt Mask Register */ +#define SH7780_PCIAIR 0x11C /* Error Address Register */ +#define SH7780_PCICIR 0x120 /* Error Command/Data Register */ +#define SH7780_PCIAINT 0x130 /* Arbiter Interrupt Register */ +#define SH7780_PCIAINTM 0x134 /* Arbiter Int. Mask Register */ +#define SH7780_PCIBMIR 0x138 /* Error Bus Master Register */ +#define SH7780_PCIPAR 0x1C0 /* PIO Address Register */ +#define SH7780_PCIPINT 0x1CC /* Power Mgmnt Int. Register */ +#define SH7780_PCIPINTM 0x1D0 /* Power Mgmnt Mask Register */ + +#define SH7780_PCIMBR(x) (0x1E0 + ((x) * 8)) +#define SH7780_PCIMBMR(x) (0x1E4 + ((x) * 8)) +#define SH7780_PCIIOBR 0x1F8 +#define SH7780_PCIIOBMR 0x1FC +#define SH7780_PCICSCR0 0x210 /* Cache Snoop1 Cnt. Register */ +#define SH7780_PCICSCR1 0x214 /* Cache Snoop2 Cnt. Register */ +#define SH7780_PCICSAR0 0x218 /* Cache Snoop1 Addr. Register */ +#define SH7780_PCICSAR1 0x21C /* Cache Snoop2 Addr. Register */ + +#endif /* _PCI_SH7780_H_ */ diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c new file mode 100644 index 000000000..a3903304f --- /dev/null +++ b/arch/sh/drivers/pci/pci.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * New-style PCI core. + * + * Copyright (c) 2004 - 2009 Paul Mundt + * Copyright (c) 2002 M. R. Brown + * + * Modelled after arch/mips/pci/pci.c: + * Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org) + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/export.h> + +unsigned long PCIBIOS_MIN_IO = 0x0000; +unsigned long PCIBIOS_MIN_MEM = 0; + +/* + * The PCI controller list. + */ +static struct pci_channel *hose_head, **hose_tail = &hose_head; + +static int pci_initialized; + +static void pcibios_scanbus(struct pci_channel *hose) +{ + static int next_busno; + static int need_domain_info; + LIST_HEAD(resources); + struct resource *res; + resource_size_t offset; + int i, ret; + struct pci_host_bridge *bridge; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; + + for (i = 0; i < hose->nr_resources; i++) { + res = hose->resources + i; + offset = 0; + if (res->flags & IORESOURCE_DISABLED) + continue; + if (res->flags & IORESOURCE_IO) + offset = hose->io_offset; + else if (res->flags & IORESOURCE_MEM) + offset = hose->mem_offset; + pci_add_resource_offset(&resources, res, offset); + } + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = next_busno; + bridge->ops = hose->pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = pcibios_map_platform_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + hose->bus = bridge->bus; + + need_domain_info = need_domain_info || hose->index; + hose->need_domain_info = need_domain_info; + + next_busno = hose->bus->busn_res.end + 1; + /* Don't allow 8-bit bus number overflow inside the hose - + reserve some space for bridges. */ + if (next_busno > 224) { + next_busno = 0; + need_domain_info = 1; + } + + pci_bus_size_bridges(hose->bus); + pci_bus_assign_resources(hose->bus); + pci_bus_add_devices(hose->bus); +} + +/* + * This interrupt-safe spinlock protects all accesses to PCI + * configuration space. + */ +DEFINE_RAW_SPINLOCK(pci_config_lock); +static DEFINE_MUTEX(pci_scan_mutex); + +int register_pci_controller(struct pci_channel *hose) +{ + int i; + + for (i = 0; i < hose->nr_resources; i++) { + struct resource *res = hose->resources + i; + + if (res->flags & IORESOURCE_DISABLED) + continue; + + if (res->flags & IORESOURCE_IO) { + if (request_resource(&ioport_resource, res) < 0) + goto out; + } else { + if (request_resource(&iomem_resource, res) < 0) + goto out; + } + } + + *hose_tail = hose; + hose_tail = &hose->next; + + /* + * Do not panic here but later - this might happen before console init. + */ + if (!hose->io_map_base) { + pr_warn("registering PCI controller with io_map_base unset\n"); + } + + /* + * Setup the ERR/PERR and SERR timers, if available. + */ + pcibios_enable_timers(hose); + + /* + * Scan the bus if it is register after the PCI subsystem + * initialization. + */ + if (pci_initialized) { + mutex_lock(&pci_scan_mutex); + pcibios_scanbus(hose); + mutex_unlock(&pci_scan_mutex); + } + + return 0; + +out: + for (--i; i >= 0; i--) + release_resource(&hose->resources[i]); + + pr_warn("Skipping PCI bus scan due to resource conflict\n"); + return -1; +} + +static int __init pcibios_init(void) +{ + struct pci_channel *hose; + + /* Scan all of the recorded PCI controllers. */ + for (hose = hose_head; hose; hose = hose->next) + pcibios_scanbus(hose); + + pci_initialized = 1; + + return 0; +} +subsys_initcall(pcibios_init); + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + */ +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_channel *hose = dev->sysdata; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + if (start < PCIBIOS_MIN_IO + hose->resources[0].start) + start = PCIBIOS_MIN_IO + hose->resources[0].start; + + /* + * Put everything into 0x00-0xff region modulo 0x400. + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } + + return start; +} + +static void __init +pcibios_bus_report_status_early(struct pci_channel *hose, + int top_bus, int current_bus, + unsigned int status_mask, int warn) +{ + unsigned int pci_devfn; + u16 status; + int ret; + + for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { + if (PCI_FUNC(pci_devfn)) + continue; + ret = early_read_config_word(hose, top_bus, current_bus, + pci_devfn, PCI_STATUS, &status); + if (ret != PCIBIOS_SUCCESSFUL) + continue; + if (status == 0xffff) + continue; + + early_write_config_word(hose, top_bus, current_bus, + pci_devfn, PCI_STATUS, + status & status_mask); + if (warn) + pr_cont("(%02x:%02x: %04X) ", current_bus, pci_devfn, + status); + } +} + +/* + * We can't use pci_find_device() here since we are + * called from interrupt context. + */ +static void __ref +pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask, + int warn) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + u16 status; + + /* + * ignore host bridge - we handle + * that separately + */ + if (dev->bus->number == 0 && dev->devfn == 0) + continue; + + pci_read_config_word(dev, PCI_STATUS, &status); + if (status == 0xffff) + continue; + + if ((status & status_mask) == 0) + continue; + + /* clear the status errors */ + pci_write_config_word(dev, PCI_STATUS, status & status_mask); + + if (warn) + pr_cont("(%s: %04X) ", pci_name(dev), status); + } + + list_for_each_entry(dev, &bus->devices, bus_list) + if (dev->subordinate) + pcibios_bus_report_status(dev->subordinate, status_mask, warn); +} + +void __ref pcibios_report_status(unsigned int status_mask, int warn) +{ + struct pci_channel *hose; + + for (hose = hose_head; hose; hose = hose->next) { + if (unlikely(!hose->bus)) + pcibios_bus_report_status_early(hose, hose_head->index, + hose->index, status_mask, warn); + else + pcibios_bus_report_status(hose->bus, status_mask, warn); + } +} + +#ifndef CONFIG_GENERIC_IOMAP + +void __iomem *__pci_ioport_map(struct pci_dev *dev, + unsigned long port, unsigned int nr) +{ + struct pci_channel *chan = dev->sysdata; + + if (unlikely(!chan->io_map_base)) { + chan->io_map_base = sh_io_port_base; + + if (pci_domains_supported) + panic("To avoid data corruption io_map_base MUST be " + "set with multiple PCI domains."); + } + + return (void __iomem *)(chan->io_map_base + port); +} + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ + iounmap(addr); +} +EXPORT_SYMBOL(pci_iounmap); + +#endif /* CONFIG_GENERIC_IOMAP */ + +EXPORT_SYMBOL(PCIBIOS_MIN_IO); +EXPORT_SYMBOL(PCIBIOS_MIN_MEM); diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c new file mode 100644 index 000000000..4468289ab --- /dev/null +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -0,0 +1,609 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Low-Level PCI Express Support for the SH7786 + * + * Copyright (C) 2009 - 2011 Paul Mundt + */ +#define pr_fmt(fmt) "PCI: " fmt + +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/async.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/sh_clk.h> +#include <linux/sh_intc.h> +#include <cpu/sh7786.h> +#include "pcie-sh7786.h" +#include <linux/sizes.h> + +struct sh7786_pcie_port { + struct pci_channel *hose; + struct clk *fclk, phy_clk; + unsigned int index; + int endpoint; + int link; +}; + +static struct sh7786_pcie_port *sh7786_pcie_ports; +static unsigned int nr_ports; +static unsigned long dma_pfn_offset; +size_t memsize; +u64 memstart; + +static struct sh7786_pcie_hwops { + int (*core_init)(void); + async_func_t port_init_hw; +} *sh7786_pcie_hwops; + +static struct resource sh7786_pci0_resources[] = { + { + .name = "PCIe0 MEM 0", + .start = 0xfd000000, + .end = 0xfd000000 + SZ_8M - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "PCIe0 MEM 1", + .start = 0xc0000000, + .end = 0xc0000000 + SZ_512M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, { + .name = "PCIe0 MEM 2", + .start = 0x10000000, + .end = 0x10000000 + SZ_64M - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "PCIe0 IO", + .start = 0xfe100000, + .end = 0xfe100000 + SZ_1M - 1, + .flags = IORESOURCE_IO, + }, +}; + +static struct resource sh7786_pci1_resources[] = { + { + .name = "PCIe1 MEM 0", + .start = 0xfd800000, + .end = 0xfd800000 + SZ_8M - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "PCIe1 MEM 1", + .start = 0xa0000000, + .end = 0xa0000000 + SZ_512M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, { + .name = "PCIe1 MEM 2", + .start = 0x30000000, + .end = 0x30000000 + SZ_256M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, { + .name = "PCIe1 IO", + .start = 0xfe300000, + .end = 0xfe300000 + SZ_1M - 1, + .flags = IORESOURCE_IO, + }, +}; + +static struct resource sh7786_pci2_resources[] = { + { + .name = "PCIe2 MEM 0", + .start = 0xfc800000, + .end = 0xfc800000 + SZ_4M - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "PCIe2 MEM 1", + .start = 0x80000000, + .end = 0x80000000 + SZ_512M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, { + .name = "PCIe2 MEM 2", + .start = 0x20000000, + .end = 0x20000000 + SZ_256M - 1, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, { + .name = "PCIe2 IO", + .start = 0xfcd00000, + .end = 0xfcd00000 + SZ_1M - 1, + .flags = IORESOURCE_IO, + }, +}; + +extern struct pci_ops sh7786_pci_ops; + +#define DEFINE_CONTROLLER(start, idx) \ +{ \ + .pci_ops = &sh7786_pci_ops, \ + .resources = sh7786_pci##idx##_resources, \ + .nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \ + .reg_base = start, \ + .mem_offset = 0, \ + .io_offset = 0, \ +} + +static struct pci_channel sh7786_pci_channels[] = { + DEFINE_CONTROLLER(0xfe000000, 0), + DEFINE_CONTROLLER(0xfe200000, 1), + DEFINE_CONTROLLER(0xfcc00000, 2), +}; + +static struct clk fixed_pciexclkp = { + .rate = 100000000, /* 100 MHz reference clock */ +}; + +static void sh7786_pci_fixup(struct pci_dev *dev) +{ + /* + * Prevent enumeration of root complex resources. + */ + if (pci_is_root_bus(dev->bus) && dev->devfn == 0) { + int i; + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_SH7786, + sh7786_pci_fixup); + +static int __init phy_wait_for_ack(struct pci_channel *chan) +{ + unsigned int timeout = 100; + + while (timeout--) { + if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK)) + return 0; + + udelay(100); + } + + return -ETIMEDOUT; +} + +static int __init pci_wait_for_irq(struct pci_channel *chan, unsigned int mask) +{ + unsigned int timeout = 100; + + while (timeout--) { + if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask) + return 0; + + udelay(100); + } + + return -ETIMEDOUT; +} + +static void __init phy_write_reg(struct pci_channel *chan, unsigned int addr, + unsigned int lane, unsigned int data) +{ + unsigned long phyaddr; + + phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) + + ((addr & 0xff) << BITS_ADR); + + /* Set write data */ + pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR); + pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR); + + phy_wait_for_ack(chan); + + /* Clear command */ + pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR); + pci_write_reg(chan, 0, SH4A_PCIEPHYADRR); + + phy_wait_for_ack(chan); +} + +static int __init pcie_clk_init(struct sh7786_pcie_port *port) +{ + struct pci_channel *chan = port->hose; + struct clk *clk; + char fclk_name[16]; + int ret; + + /* + * First register the fixed clock + */ + ret = clk_register(&fixed_pciexclkp); + if (unlikely(ret != 0)) + return ret; + + /* + * Grab the port's function clock, which the PHY clock depends + * on. clock lookups don't help us much at this point, since no + * dev_id is available this early. Lame. + */ + snprintf(fclk_name, sizeof(fclk_name), "pcie%d_fck", port->index); + + port->fclk = clk_get(NULL, fclk_name); + if (IS_ERR(port->fclk)) { + ret = PTR_ERR(port->fclk); + goto err_fclk; + } + + clk_enable(port->fclk); + + /* + * And now, set up the PHY clock + */ + clk = &port->phy_clk; + + memset(clk, 0, sizeof(struct clk)); + + clk->parent = &fixed_pciexclkp; + clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR); + clk->enable_bit = BITS_CKE; + + ret = sh_clk_mstp_register(clk, 1); + if (unlikely(ret < 0)) + goto err_phy; + + return 0; + +err_phy: + clk_disable(port->fclk); + clk_put(port->fclk); +err_fclk: + clk_unregister(&fixed_pciexclkp); + + return ret; +} + +static int __init phy_init(struct sh7786_pcie_port *port) +{ + struct pci_channel *chan = port->hose; + unsigned int timeout = 100; + + clk_enable(&port->phy_clk); + + /* Initialize the phy */ + phy_write_reg(chan, 0x60, 0xf, 0x004b008b); + phy_write_reg(chan, 0x61, 0xf, 0x00007b41); + phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00); + phy_write_reg(chan, 0x65, 0xf, 0x09070907); + phy_write_reg(chan, 0x66, 0xf, 0x00000010); + phy_write_reg(chan, 0x74, 0xf, 0x0007001c); + phy_write_reg(chan, 0x79, 0xf, 0x01fc000d); + phy_write_reg(chan, 0xb0, 0xf, 0x00000610); + + /* Deassert Standby */ + phy_write_reg(chan, 0x67, 0x1, 0x00000400); + + /* Disable clock */ + clk_disable(&port->phy_clk); + + while (timeout--) { + if (pci_read_reg(chan, SH4A_PCIEPHYSR)) + return 0; + + udelay(100); + } + + return -ETIMEDOUT; +} + +static void __init pcie_reset(struct sh7786_pcie_port *port) +{ + struct pci_channel *chan = port->hose; + + pci_write_reg(chan, 1, SH4A_PCIESRSTR); + pci_write_reg(chan, 0, SH4A_PCIETCTLR); + pci_write_reg(chan, 0, SH4A_PCIESRSTR); + pci_write_reg(chan, 0, SH4A_PCIETXVC0SR); +} + +static int __init pcie_init(struct sh7786_pcie_port *port) +{ + struct pci_channel *chan = port->hose; + unsigned int data; + phys_addr_t memstart, memend; + int ret, i, win; + + /* Begin initialization */ + pcie_reset(port); + + /* + * Initial header for port config space is type 1, set the device + * class to match. Hardware takes care of propagating the IDSETR + * settings, so there is no need to bother with a quirk. + */ + pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI << 16, SH4A_PCIEIDSETR1); + + /* Initialize default capabilities. */ + data = pci_read_reg(chan, SH4A_PCIEEXPCAP0); + data &= ~(PCI_EXP_FLAGS_TYPE << 16); + + if (port->endpoint) + data |= PCI_EXP_TYPE_ENDPOINT << 20; + else + data |= PCI_EXP_TYPE_ROOT_PORT << 20; + + data |= PCI_CAP_ID_EXP; + pci_write_reg(chan, data, SH4A_PCIEEXPCAP0); + + /* Enable data link layer active state reporting */ + pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3); + + /* Enable extended sync and ASPM L0s support */ + data = pci_read_reg(chan, SH4A_PCIEEXPCAP4); + data &= ~PCI_EXP_LNKCTL_ASPMC; + data |= PCI_EXP_LNKCTL_ES | 1; + pci_write_reg(chan, data, SH4A_PCIEEXPCAP4); + + /* Write out the physical slot number */ + data = pci_read_reg(chan, SH4A_PCIEEXPCAP5); + data &= ~PCI_EXP_SLTCAP_PSN; + data |= (port->index + 1) << 19; + pci_write_reg(chan, data, SH4A_PCIEEXPCAP5); + + /* Set the completion timer timeout to the maximum 32ms. */ + data = pci_read_reg(chan, SH4A_PCIETLCTLR); + data &= ~0x3f00; + data |= 0x32 << 8; + pci_write_reg(chan, data, SH4A_PCIETLCTLR); + + /* + * Set fast training sequences to the maximum 255, + * and enable MAC data scrambling. + */ + data = pci_read_reg(chan, SH4A_PCIEMACCTLR); + data &= ~PCIEMACCTLR_SCR_DIS; + data |= (0xff << 16); + pci_write_reg(chan, data, SH4A_PCIEMACCTLR); + + memstart = __pa(memory_start); + memend = __pa(memory_end); + memsize = roundup_pow_of_two(memend - memstart); + + /* + * The start address must be aligned on its size. So we round + * it down, and then recalculate the size so that it covers + * the entire memory. + */ + memstart = ALIGN_DOWN(memstart, memsize); + memsize = roundup_pow_of_two(memend - memstart); + + /* + * If there's more than 512MB of memory, we need to roll over to + * LAR1/LAMR1. + */ + if (memsize > SZ_512M) { + pci_write_reg(chan, memstart + SZ_512M, SH4A_PCIELAR1); + pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1, + SH4A_PCIELAMR1); + memsize = SZ_512M; + } else { + /* + * Otherwise just zero it out and disable it. + */ + pci_write_reg(chan, 0, SH4A_PCIELAR1); + pci_write_reg(chan, 0, SH4A_PCIELAMR1); + } + + /* + * LAR0/LAMR0 covers up to the first 512MB, which is enough to + * cover all of lowmem on most platforms. + */ + pci_write_reg(chan, memstart, SH4A_PCIELAR0); + pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0); + + /* Finish initialization */ + data = pci_read_reg(chan, SH4A_PCIETCTLR); + data |= 0x1; + pci_write_reg(chan, data, SH4A_PCIETCTLR); + + /* Let things settle down a bit.. */ + mdelay(100); + + /* Enable DL_Active Interrupt generation */ + data = pci_read_reg(chan, SH4A_PCIEDLINTENR); + data |= PCIEDLINTENR_DLL_ACT_ENABLE; + pci_write_reg(chan, data, SH4A_PCIEDLINTENR); + + /* Disable MAC data scrambling. */ + data = pci_read_reg(chan, SH4A_PCIEMACCTLR); + data |= PCIEMACCTLR_SCR_DIS | (0xff << 16); + pci_write_reg(chan, data, SH4A_PCIEMACCTLR); + + /* + * This will timeout if we don't have a link, but we permit the + * port to register anyways in order to support hotplug on future + * hardware. + */ + ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL); + + data = pci_read_reg(chan, SH4A_PCIEPCICONF1); + data &= ~(PCI_STATUS_DEVSEL_MASK << 16); + data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + (PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16; + pci_write_reg(chan, data, SH4A_PCIEPCICONF1); + + pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR); + pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR); + + wmb(); + + if (ret == 0) { + data = pci_read_reg(chan, SH4A_PCIEMACSR); + printk(KERN_NOTICE "PCI: PCIe#%d x%d link detected\n", + port->index, (data >> 20) & 0x3f); + } else + printk(KERN_NOTICE "PCI: PCIe#%d link down\n", + port->index); + + for (i = win = 0; i < chan->nr_resources; i++) { + struct resource *res = chan->resources + i; + resource_size_t size; + u32 mask; + + /* + * We can't use the 32-bit mode windows in legacy 29-bit + * mode, so just skip them entirely. + */ + if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) + res->flags |= IORESOURCE_DISABLED; + + if (res->flags & IORESOURCE_DISABLED) + continue; + + pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win)); + + /* + * The PAMR mask is calculated in units of 256kB, which + * keeps things pretty simple. + */ + size = resource_size(res); + mask = (roundup_pow_of_two(size) / SZ_256K) - 1; + pci_write_reg(chan, mask << 18, SH4A_PCIEPAMR(win)); + + pci_write_reg(chan, upper_32_bits(res->start), + SH4A_PCIEPARH(win)); + pci_write_reg(chan, lower_32_bits(res->start), + SH4A_PCIEPARL(win)); + + mask = MASK_PARE; + if (res->flags & IORESOURCE_IO) + mask |= MASK_SPC; + + pci_write_reg(chan, mask, SH4A_PCIEPTCTLR(win)); + + win++; + } + + return 0; +} + +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + return evt2irq(0xae0); +} + +void pcibios_bus_add_device(struct pci_dev *pdev) +{ + dma_direct_set_offset(&pdev->dev, __pa(memory_start), + __pa(memory_start) - memstart, memsize); +} + +static int __init sh7786_pcie_core_init(void) +{ + /* Return the number of ports */ + return test_mode_pin(MODE_PIN12) ? 3 : 2; +} + +static void __init sh7786_pcie_init_hw(void *data, async_cookie_t cookie) +{ + struct sh7786_pcie_port *port = data; + int ret; + + /* + * Check if we are configured in endpoint or root complex mode, + * this is a fixed pin setting that applies to all PCIe ports. + */ + port->endpoint = test_mode_pin(MODE_PIN11); + + /* + * Setup clocks, needed both for PHY and PCIe registers. + */ + ret = pcie_clk_init(port); + if (unlikely(ret < 0)) { + pr_err("clock initialization failed for port#%d\n", + port->index); + return; + } + + ret = phy_init(port); + if (unlikely(ret < 0)) { + pr_err("phy initialization failed for port#%d\n", + port->index); + return; + } + + ret = pcie_init(port); + if (unlikely(ret < 0)) { + pr_err("core initialization failed for port#%d\n", + port->index); + return; + } + + /* In the interest of preserving device ordering, synchronize */ + async_synchronize_cookie(cookie); + + register_pci_controller(port->hose); +} + +static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = { + .core_init = sh7786_pcie_core_init, + .port_init_hw = sh7786_pcie_init_hw, +}; + +static int __init sh7786_pcie_init(void) +{ + struct clk *platclk; + u32 mm_sel; + int i; + + printk(KERN_NOTICE "PCI: Starting initialization.\n"); + + sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops; + + nr_ports = sh7786_pcie_hwops->core_init(); + BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels)); + + if (unlikely(nr_ports == 0)) + return -ENODEV; + + sh7786_pcie_ports = kcalloc(nr_ports, sizeof(struct sh7786_pcie_port), + GFP_KERNEL); + if (unlikely(!sh7786_pcie_ports)) + return -ENOMEM; + + /* + * Fetch any optional platform clock associated with this block. + * + * This is a rather nasty hack for boards with spec-mocking FPGAs + * that have a secondary set of clocks outside of the on-chip + * ones that need to be accounted for before there is any chance + * of touching the existing MSTP bits or CPG clocks. + */ + platclk = clk_get(NULL, "pcie_plat_clk"); + if (IS_ERR(platclk)) { + /* Sane hardware should probably get a WARN_ON.. */ + platclk = NULL; + } + + clk_enable(platclk); + + mm_sel = sh7786_mm_sel(); + + /* + * Depending on the MMSELR register value, the PCIe0 MEM 1 + * area may not be available. See Table 13.11 of the SH7786 + * datasheet. + */ + if (mm_sel != 1 && mm_sel != 2 && mm_sel != 5 && mm_sel != 6) + sh7786_pci0_resources[2].flags |= IORESOURCE_DISABLED; + + printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports); + + for (i = 0; i < nr_ports; i++) { + struct sh7786_pcie_port *port = sh7786_pcie_ports + i; + + port->index = i; + port->hose = sh7786_pci_channels + i; + port->hose->io_map_base = port->hose->resources[0].start; + + async_schedule(sh7786_pcie_hwops->port_init_hw, port); + } + + async_synchronize_full(); + + return 0; +} +arch_initcall(sh7786_pcie_init); diff --git a/arch/sh/drivers/pci/pcie-sh7786.h b/arch/sh/drivers/pci/pcie-sh7786.h new file mode 100644 index 000000000..ffe383681 --- /dev/null +++ b/arch/sh/drivers/pci/pcie-sh7786.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * SH7786 PCI-Express controller definitions. + * + * Copyright (C) 2008, 2009 Renesas Technology Corp. + * All rights reserved. + */ +#ifndef __PCI_SH7786_H +#define __PCI_SH7786_H + +/* PCIe bus-0(x4) on SH7786 */ // Rev1.171 +#define SH4A_PCIE_SPW_BASE 0xFE000000 /* spw config address for controller 0 */ +#define SH4A_PCIE_SPW_BASE1 0xFE200000 /* spw config address for controller 1 (Rev1.14)*/ +#define SH4A_PCIE_SPW_BASE2 0xFCC00000 /* spw config address for controller 2 (Rev1.171)*/ +#define SH4A_PCIE_SPW_BASE_LEN 0x00080000 + +#define SH4A_PCI_CNFG_BASE 0xFE040000 /* pci config address for controller 0 */ +#define SH4A_PCI_CNFG_BASE1 0xFE240000 /* pci config address for controller 1 (Rev1.14)*/ +#define SH4A_PCI_CNFG_BASE2 0xFCC40000 /* pci config address for controller 2 (Rev1.171)*/ +#define SH4A_PCI_CNFG_BASE_LEN 0x00040000 + +#define SH4A_PCIPIO_ADDR_OFFSET 0x000001c0 /* offset to pci config_address */ +#define SH4A_PCIPIO_DATA_OFFSET 0x00000220 /* offset to pci config_data */ + +/* + * for PEX8111(Max Payload Size=128B,PCIIO_SIZE=64K), + * for other(Max Payload Size=4096B,PCIIO_SIZE=8M) + */ + +/* PCI0: PCI memory target transfer 32-bit address translation value(Rev1.11T)*/ +#define SH4A_PCIBMSTR_TRANSLATION 0x20000000 + +/* SPVCR0 */ +#define SH4A_PCIEVCR0 (0x000000) /* R - 0x0000 0000 32 */ +#define BITS_TOP_MB (24) +#define MASK_TOP_MB (0xff<<BITS_TOP_MB) +#define BITS_BOT_MB (16) +#define MASK_BOT_MB (0xff<<BITS_BOT_MB) +#define BITS_VC_ID (0) +#define MASK_VC_ID (0xffff<<BITS_VC_ID) + +/* SPVCR1 */ +#define SH4A_PCIEVCR1 (0x000004) /* R - 0x0000 0000 32*/ +#define BITS_BADOPC (5) /* 5 BADOPC 0 R/W */ +#define MASK_BADOPC (1<<BITS_BADOPC) +#define BITS_BADDEST (4) /*4 BADDEST 0 R/W */ +#define MASK_BADDEST (1<<BITS_BADDEST) +#define BITS_UNSOLRESP (3) /* 3 UNSOLRESP 0 R/W */ +#define MASK_UNSOLRESP (1<<BITS_UNSOLRESP) +#define BITS_ERRSNT (1) /* 1 ERRSNT 0 */ +#define MASK_ERRSNT (1<<BITS_ERRSNT) +#define BITS_ERRRCV (0) /* 0 ERRRCV 0 */ +#define MASK_ERRRCV (1<<BITS_ERRRCV) + +/* PCIEENBLR */ +#define SH4A_PCIEENBLR (0x000008) /* R/W - 0x0000 0001 32 */ + +/* PCIEECR */ +#define SH4A_PCIEECR (0x00000C) /* R/W - 0x0000 0000 32 */ +#define BITS_ENBL (0) /* 0 ENBL 0 R/W */ +#define MASK_ENBL (1<<BITS_ENBL) + +/* PCIEPAR */ +#define SH4A_PCIEPAR (0x000010) /* R/W - 0x0000 0000 32 */ +#define BITS_BN (24) +#define MASK_BN (0xff<<BITS_BN) +#define BITS_DN (19) +#define MASK_DN (0x1f<<BITS_DN) +#define BITS_FN (16) +#define MASK_FN (0x7<<BITS_FN) +#define BITS_EREGNO (8) +#define MASK_EREGNO (0xff<<BITS_EREGNO) +#define BITS_REGNO (2) +#define MASK_REGNO (0x3f<<BITS_REGNO) + +/* PCIEPCTLR */ +#define SH4A_PCIEPCTLR (0x000018) /* R/W - 0x0000 0000 32 */ +#define BITS_CCIE (31) /* 31 CCIE */ +#define MASK_CCIE (1<<BITS_CCIE) +#define BITS_TYPE (8) +#define MASK_TYPE (1<<BITS_TYPE) +#define BITS_C_VC (0) +#define MASK_C_VC (1<<BITS_C_VC) + +/* PCIEPDR */ +#define SH4A_PCIEPDR (0x000020) /* R/W - 0x0000 0000 32 */ +#define BITS_PDR (0) +#define MASK_PDR (0xffffffff<<BITS_PDR) + +/* PCIEMSGALR */ +#define SH4A_PCIEMSGALR (0x000030) /* R/W - 0x0000 0000 32 */ +#define BITS_MSGADRL (0) +#define MASK_MSGADRL (0xffffffff<<BITS_MSGADRL) + +/* PCIEMSGAHR */ +#define SH4A_PCIEMSGAHR (0x000034) /* R/W - 0x0000 0000 32 */ +#define BITS_MSGADRH (0) +#define MASK_MSGADRH (0xffffffff<<BITS_MSGADRH) + +/* PCIEMSGCTLR */ +#define SH4A_PCIEMSGCTLR (0x000038) /* R/W - 0x0000 0000 32 */ +#define BITS_MSGIE (31) +#define MASK_MSGIE (1<<BITS_MSGIE) +#define BITS_MROUTE (16) +#define MASK_MROUTE (0x7<<BITS_MROUTE) +#define BITS_MCODE (8) +#define MASK_MCODE (0xff<<BITS_MCODE) +#define BITS_M_VC (0) +#define MASK_M_VC (1<<BITS_M_VC) + +/* PCIEMSG */ +#define SH4A_PCIEMSG (0x000040) /* W - - 32 */ +#define BITS_MDATA (0) +#define MASK_MDATA (0xffffffff<<BITS_MDATA) + +/* PCIEUNLOCKCR */ +#define SH4A_PCIEUNLOCKCR (0x000048) /* R/W - 0x0000 0000 32 */ + +/* PCIEIDR */ +#define SH4A_PCIEIDR (0x000060) /* R/W - 0x0101 1101 32 */ + +/* PCIEDBGCTLR */ +#define SH4A_PCIEDBGCTLR (0x000100) /* R/W - 0x0000 0000 32 */ + +/* PCIEINTXR */ +#define SH4A_PCIEINTXR (0x004000) /* R/W - 0x0000 0000 32 */ + +/* PCIERMSGR */ +#define SH4A_PCIERMSGR (0x004010) /* R/W - 0x0000 0000 32 */ + +/* PCIERSTR */ +#define SH4A_PCIERSTR(x) (0x008000 + ((x) * 0x4)) /* R/W - 0x0000 0000 32 */ + +/* PCIESRSTR */ +#define SH4A_PCIESRSTR (0x008040) /* R/W - 0x0000 0000 32 */ + +/* PCIEPHYCTLR */ +#define SH4A_PCIEPHYCTLR (0x010000) /* R/W - 0x0000 0000 32 */ +#define BITS_CKE (0) +#define MASK_CKE (1<<BITS_CKE) + +/* PCIERMSGIER */ +#define SH4A_PCIERMSGIER (0x004040) /* R/W - 0x0000 0000 32 */ + +/* PCIEPHYADRR */ +#define SH4A_PCIEPHYADRR (0x010004) /* R/W - 0x0000 0000 32 */ +#define BITS_ACK (24) // Rev1.171 +#define MASK_ACK (1<<BITS_ACK) // Rev1.171 +#define BITS_CMD (16) // Rev1.171 +#define MASK_CMD (0x03<<BITS_CMD) // Rev1.171 +#define BITS_LANE (8) +#define MASK_LANE (0x0f<<BITS_LANE) +#define BITS_ADR (0) +#define MASK_ADR (0xff<<BITS_ADR) + +/* PCIEPHYDINR */ // Rev1.171 start. +#define SH4A_PCIEPHYDINR (0x010008) /* R/W - 0x0000 0000 32 */ + +/* PCIEPHYDOUTR */ +#define SH4A_PCIEPHYDOUTR (0x01000C) /* R/W - 0x0000 0000 32 */ + +/* PCIEPHYSR */ +#define SH4A_PCIEPHYSR (0x010010) /* R/W - 0x0000 0000 32 */ // Rev1.171 end. + +/* PCIEPHYDATAR */ +#define SH4A_PCIEPHYDATAR (0x00008) /* R/W - 0xxxxx xxxx 32 */ +#define BITS_DATA (0) +#define MASK_DATA (0xffffffff<<BITS_DATA) + +/* PCIETCTLR */ +#define SH4A_PCIETCTLR (0x020000) /* R/W R/W 0x0000 0000 32 */ +#define BITS_CFINT (0) +#define MASK_CFINT (1<<BITS_CFINT) + +/* PCIETSTR */ +#define SH4A_PCIETSTR (0x020004) /* R 0x0000 0000 32 */ + +/* PCIEINTR */ +#define SH4A_PCIEINTR (0x020008) /* R/W R/W 0x0000 0000 32 */ +#define BITS_INT_RX_ERP (31) +#define MASK_INT_RX_ERP (1<<BITS_INT_RX_ERP) +#define BITS_INT_RX_VCX_Posted (30) +#define MASK_INT_RX_VCX_Posted (1<<BITS_INT_RX_VCX_Posted) +#define BITS_INT_RX_VCX_NonPosted (29) +#define MASK_INT_RX_VCX_NonPosted (1<<BITS_INT_RX_VCX_NonPosted) +#define BITS_INT_RX_VCX_CPL (28) +#define MASK_INT_RX_VCX_CPL (1<<BITS_INT_RX_VCX_CPL) +#define BITS_INT_TX_VCX_Posted (26) +#define MASK_INT_TX_VCX_Posted (1<<BITS_INT_TX_VCX_Posted) +#define BITS_INT_TX_VCX_NonPosted (25) +#define MASK_INT_TX_VCX_NonPosted (1<<BITS_INT_TX_VCX_NonPosted) +#define BITS_INT_TX_VCX_CPL (24) +#define MASK_INT_TX_VCX_CPL (1<<BITS_INT_TX_VCX_CPL) +#define BITS_INT_RX_VC0_Posted (22) +#define MASK_INT_RX_VC0_Posted (1<<BITS_INT_RX_VC0_Posted) +#define BITS_INT_RX_VC0_NonPosted (21) +#define MASK_INT_RX_VC0_NonPosted (1<<BITS_INT_RX_VC0_NonPosted) +#define BITS_INT_RX_VC0_CPL (20) +#define MASK_INT_RX_VC0_CPL (1<<BITS_INT_RX_VC0_CPL) +#define BITS_INT_TX_VC0_Posted (18) +#define MASK_INT_TX_VC0_Posted (1<<BITS_INT_TX_VC0_Posted) +#define BITS_INT_TX_VC0_NonPosted (17) +#define MASK_INT_TX_VC0_NonPosted (1<<BITS_INT_TX_VC0_NonPosted) +#define BITS_INT_TX_VC0_CPL (16) +#define MASK_INT_TX_VC0_CPL (1<<BITS_INT_TX_VC0_CPL) +#define BITS_INT_RX_CTRL (15) +#define MASK_INT_RX_CTRL (1<<BITS_INT_RX_CTRL) +#define BITS_INT_TX_CTRL (14) +#define MASK_INT_TX_CTRL (1<<BITS_INT_TX_CTRL) +#define BITS_INTTL (11) +#define MASK_INTTL (1<<BITS_INTTL) +#define BITS_INTDL (10) +#define MASK_INTDL (1<<BITS_INTDL) +#define BITS_INTMAC (9) +#define MASK_INTMAC (1<<BITS_INTMAC) +#define BITS_INTPM (8) +#define MASK_INTPM (1<<BITS_INTPM) + +/* PCIEINTER */ +#define SH4A_PCIEINTER (0x02000C) /* R/W R/W 0x0000 0000 32 */ +#define BITS_INT_RX_ERP (31) +#define MASK_INT_RX_ERP (1<<BITS_INT_RX_ERP) +#define BITS_INT_RX_VCX_Posted (30) +#define MASK_INT_RX_VCX_Posted (1<<BITS_INT_RX_VCX_Posted) +#define BITS_INT_RX_VCX_NonPosted (29) +#define MASK_INT_RX_VCX_NonPosted (1<<BITS_INT_RX_VCX_NonPosted) +#define BITS_INT_RX_VCX_CPL (28) +#define MASK_INT_RX_VCX_CPL (1<<BITS_INT_RX_VCX_CPL) +#define BITS_INT_TX_VCX_Posted (26) +#define MASK_INT_TX_VCX_Posted (1<<BITS_INT_TX_VCX_Posted) +#define BITS_INT_TX_VCX_NonPosted (25) +#define MASK_INT_TX_VCX_NonPosted (1<<BITS_INT_TX_VCX_NonPosted) +#define BITS_INT_TX_VCX_CPL (24) +#define MASK_INT_TX_VCX_CPL (1<<BITS_INT_TX_VCX_CPL) +#define BITS_INT_RX_VC0_Posted (22) +#define MASK_INT_RX_VC0_Posted (1<<BITS_INT_RX_VC0_Posted) +#define BITS_INT_RX_VC0_NonPosted (21) +#define MASK_INT_RX_VC0_NonPosted (1<<BITS_INT_RX_VC0_NonPosted) +#define BITS_INT_RX_VC0_CPL (20) +#define MASK_INT_RX_VC0_CPL (1<<BITS_INT_RX_VC0_CPL) +#define BITS_INT_TX_VC0_Posted (18) +#define MASK_INT_TX_VC0_Posted (1<<BITS_INT_TX_VC0_Posted) +#define BITS_INT_TX_VC0_NonPosted (17) +#define MASK_INT_TX_VC0_NonPosted (1<<BITS_INT_TX_VC0_NonPosted) +#define BITS_INT_TX_VC0_CPL (16) +#define MASK_INT_TX_VC0_CPL (1<<BITS_INT_TX_VC0_CPL) +#define BITS_INT_RX_CTRL (15) +#define MASK_INT_RX_CTRL (1<<BITS_INT_RX_CTRL) +#define BITS_INT_TX_CTRL (14) +#define MASK_INT_TX_CTRL (1<<BITS_INT_TX_CTRL) +#define BITS_INTTL (11) +#define MASK_INTTL (1<<BITS_INTTL) +#define BITS_INTDL (10) +#define MASK_INTDL (1<<BITS_INTDL) +#define BITS_INTMAC (9) +#define MASK_INTMAC (1<<BITS_INTMAC) +#define BITS_INTPM (8) +#define MASK_INTPM (1<<BITS_INTPM) + +/* PCIEEH0R */ +#define SH4A_PCIEEHR(x) (0x020010 + ((x) * 0x4)) /* R - 0x0000 0000 32 */ + +/* PCIEAIR */ +#define SH4A_PCIEAIR (SH4A_PCIE_BASE + 0x020010) /* R/W R/W 0xxxxx xxxx 32 */ + +/* PCIECIR */ +#define SH4A_PCIECIR (SH4A_PCIE_BASE) /* R/W R/W 0xxxxx xxxx 32 */ + +/* PCIEERRFR */ // Rev1.18 +#define SH4A_PCIEERRFR (0x020020) /* R/W R/W 0xxxxx xxxx 32 */ // Rev1.18 + +/* PCIEERRFER */ +#define SH4A_PCIEERRFER (0x020024) /* R/W R/W 0x0000 0000 32 */ + +/* PCIEERRFR2 */ +#define SH4A_PCIEERRFR2 (0x020028) /* R/W R/W 0x0000 0000 32 */ + +/* PCIEMSIR */ +#define SH4A_PCIEMSIR (0x020040) /* R/W - 0x0000 0000 32 */ + +/* PCIEMSIFR */ +#define SH4A_PCIEMSIFR (0x020044) /* R/W R/W 0x0000 0000 32 */ + +/* PCIEPWRCTLR */ +#define SH4A_PCIEPWRCTLR (0x020100) /* R/W - 0x0000 0000 32 */ + +/* PCIEPCCTLR */ +#define SH4A_PCIEPCCTLR (0x020180) /* R/W - 0x0000 0000 32 */ + + // Rev1.18 +/* PCIELAR0 */ +#define SH4A_PCIELAR0 (0x020200) /* R/W R/W 0x0000 0000 32 */ +#define BITS_LARn (20) +#define MASK_LARn (0xfff<<BITS_LARn) + +#define SH4A_PCIE_020204 (0x020204) /* R/W R/W 0x0000 0000 32 */ + +/* PCIELAMR0 */ +#define SH4A_PCIELAMR0 (0x020208) /* R/W R/W 0x0000 0000 32 */ +#define BITS_LAMRn (20) +#define MASK_LAMRn (0x1ff<<BITS_LAMRn) +#define BITS_LAREn (0) +#define MASK_LAREn (0x1<<BITS_LAREn) + +/* PCIECSCR0 */ +#define SH4A_PCIECSCR0 (0x020210) /* R/W R/W 0x0000 0000 32 */ +#define BITS_RANGE (2) +#define MASK_RANGE (0x7<<BITS_RANGE) +#define BITS_SNPMD (0) +#define MASK_SNPMD (0x3<<BITS_SNPMD) + +/* PCIECSAR0 */ +#define SH4A_PCIECSAR0 (0x020214) /* R/W R/W 0x0000 0000 32 */ +#define BITS_CSADR (0) +#define MASK_CSADR (0xffffffff<<BITS_CSADR) + +/* PCIESTCTLR0 */ +#define SH4A_PCIESTCTLR0 (0x020218) /* R/W R/W 0x0000 0000 32 */ +#define BITS_SHPRI (8) +#define MASK_SHPRI (0x0f<<BITS_SHPRI) + +#define SH4A_PCIE_020224 (0x020224) /* R/W R/W 0x0000 0000 32 */ + +#define SH4A_PCIELAR1 (0x020220) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIELAMR1 (0x020228) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSCR1 (0x020230) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSAR1 (0x020234) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIESTCTLR1 (0x020238) /* R/W R/W 0x0000 0000 32 */ + +#define SH4A_PCIELAR2 (0x020240) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIE_020244 (0x020244) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIELAMR2 (0x020248) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSCR2 (0x020250) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSAR2 (0x020254) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIESTCTLR2 (0x020258) /* R/W R/W 0x0000 0000 32 */ + +#define SH4A_PCIELAR3 (0x020260) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIE_020264 (0x020264) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIELAMR3 (0x020268) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSCR3 (0x020270) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSAR3 (0x020274) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIESTCTLR3 (0x020278) /* R/W R/W 0x0000 0000 32 */ + +#define SH4A_PCIELAR4 (0x020280) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIE_020284 (0x020284) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIELAMR4 (0x020288) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSCR4 (0x020290) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSAR4 (0x020294) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIESTCTLR4 (0x020298) /* R/W R/W 0x0000 0000 32 */ + +#define SH4A_PCIELAR5 (0x0202A0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIE_0202A4 (0x0202A4) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIELAMR5 (0x0202A8) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSCR5 (0x0202B0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIECSAR5 (0x0202B4) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIESTCTLR5 (0x0202B8) /* R/W R/W 0x0000 0000 32 */ + +/* PCIEPARL */ +#define SH4A_PCIEPARL(x) (0x020400 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ +#define BITS_PAL (18) +#define MASK_PAL (0x3fff<<BITS_PAL) + +/* PCIEPARH */ +#define SH4A_PCIEPARH(x) (0x020404 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ +#define BITS_PAH (0) +#define MASK_PAH (0xffffffff<<BITS_PAH) + +/* PCIEPAMR */ +#define SH4A_PCIEPAMR(x) (0x020408 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ +#define BITS_PAM (18) +#define MASK_PAM (0x3fff<<BITS_PAM) + +/* PCIEPTCTLR */ +#define SH4A_PCIEPTCTLR(x) (0x02040C + ((x) * 0x20)) +#define BITS_PARE (31) +#define MASK_PARE (0x1<<BITS_PARE) +#define BITS_TC (20) +#define MASK_TC (0x7<<BITS_TC) +#define BITS_T_VC (16) +#define MASK_T_VC (0x1<<BITS_T_VC) +#define BITS_LOCK (12) +#define MASK_LOCK (0x1<<BITS_LOCK) +#define BITS_SPC (8) +#define MASK_SPC (0x1<<BITS_SPC) + +#define SH4A_PCIEDMAOR (0x021000) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAR0 (0x021100) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAHR0 (0x021104) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAR0 (0x021108) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAHR0 (0x02110C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMBCNTR0 (0x021110) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSBCNTR0 (0x021114) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSTRR0 (0x021118) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCAR0 (0x02111C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCR0 (0x021120) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCC2R0 (0x021124) /* R/W R/W 0x0000 0000 - */ +#define SH4A_PCIEDMCCCR0 (0x021128) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCHSR0 (0x02112C) /* R/W - 0x0000 0000 32 */ +#define SH4A_PCIEDMSAR1 (0x021140) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAHR1 (0x021144) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAR1 (0x021148) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAHR1 (0x02114C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMBCNTR1 (0x021150) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSBCNTR1 (0x021154) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSTRR1 (0x021158) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCAR1 (0x02115C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCR1 (0x021160) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCC2R1 (0x021164) /* R/W R/W 0x0000 0000 - */ +#define SH4A_PCIEDMCCCR1 (0x021168) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCHSR1 (0x02116C) /* R/W - 0x0000 0000 32 */ +#define SH4A_PCIEDMSAR2 (0x021180) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAHR2 (0x021184) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAR2 (0x021188) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAHR2 (0x02118C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMBCNTR2 (0x021190) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSBCNTR2 (0x021194) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSTRR2 (0x021198) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCAR2 (0x02119C) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCR2 (0x0211A0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCC2R2 (0x0211A4) /* R/W R/W 0x0000 0000 - */ +#define SH4A_PCIEDMCCCR2 (0x0211A8) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAR3 (0x0211C0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSAHR3 (0x0211C4) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAR3 (0x0211C8) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMDAHR3 (0x0211CC) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMBCNTR3 (0x0211D0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSBCNTR3 (0x0211D4) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMSTRR3 (0x0211D8) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCAR3 (0x0211DC) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCCR3 (0x0211E0) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCC2R3 (0x0211E4) /* R/W R/W 0x0000 0000 - */ +#define SH4A_PCIEDMCCCR3 (0x0211E8) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEDMCHSR3 (0x0211EC) /* R/W R/W 0x0000 0000 32 */ +#define SH4A_PCIEPCICONF0 (0x040000) /* R R - 8/16/32 */ +#define SH4A_PCIEPCICONF1 (0x040004) /* R/W R/W 0x0008 0000 8/16/32 */ +#define SH4A_PCIEPCICONF2 (0x040008) /* R/W R/W 0xFF00 0000 8/16/32 */ +#define SH4A_PCIEPCICONF3 (0x04000C) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEPCICONF4 (0x040010) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF5 (0x040014) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF6 (0x040018) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF7 (0x04001C) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF8 (0x040020) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF9 (0x040024) /* - R/W - 8/16/32 */ +#define SH4A_PCIEPCICONF10 (0x040028) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEPCICONF11 (0x04002C) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEPCICONF12 (0x040030) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEPCICONF13 (0x040034) /* R/W R/W 0x0000 0040 8/16/32 */ +#define SH4A_PCIEPCICONF14 (0x040038) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEPCICONF15 (0x04003C) /* R/W R/W 0x0000 00FF 8/16/32 */ +#define SH4A_PCIEPMCAP0 (0x040040) /* R/W R 0x0003 5001 8/16/32 */ +#define SH4A_PCIEPMCAP1 (0x040044) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEMSICAP0 (0x040050) /* R/W R/W 0x0180 7005 8/16/32 */ +#define SH4A_PCIEMSICAP1 (0x040054) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEMSICAP2 (0x040058) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEMSICAP3 (0x04005C) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEMSICAP4 (0x040060) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEMSICAP5 (0x040064) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEEXPCAP0 (0x040070) /* R/W R/W 0x0001 0010 8/16/32 */ +#define SH4A_PCIEEXPCAP1 (0x040074) /* R/W R 0x0000 0005 8/16/32 */ +#define SH4A_PCIEEXPCAP2 (0x040078) /* R/W R/W 0x0000 0801 8/16/32 */ +#define SH4A_PCIEEXPCAP3 (0x04007C) /* R/W R 0x0003 F421 8/16/32 */ +#define SH4A_PCIEEXPCAP4 (0x040080) /* R/W R/W 0x0041 0000 8/16/32 */ +#define SH4A_PCIEEXPCAP5 (0x040084) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEEXPCAP6 (0x040088) /* R/W R/W 0x0000 03C0 8/16/32 */ +#define SH4A_PCIEEXPCAP7 (0x04008C) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEEXPCAP8 (0x040090) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP0 (0x040100) /* R/W R 0x1B01 0002 8/16/32 */ +#define SH4A_PCIEVCCAP1 (0x040104) /* R R 0x0000 0001 8/16/32 */ +#define SH4A_PCIEVCCAP2 (0x040108) /* R R 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP3 (0x04010C) /* R R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP4 (0x040110) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP5 (0x040114) /* R/W R/W 0x8000 00FF 8/16/32 */ +#define SH4A_PCIEVCCAP6 (0x040118) /* R/W R 0x0002 0000 8/16/32 */ +#define SH4A_PCIEVCCAP7 (0x04011C) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP8 (0x040120) /* R/W R/W 0x0000 0000 8/16/32 */ +#define SH4A_PCIEVCCAP9 (0x040124) /* R/W R 0x0002 0000 8/16/32 */ +#define SH4A_PCIENUMCAP0 (0x0001B0) /* RW R 0x0001 0003 8/16/32 */ +#define SH4A_PCIENUMCAP1 (0x0001B4) /* R R 0x0000 0000 8/16/32 */ +#define SH4A_PCIENUMCAP2 (0x0001B8) /* R R 0x0000 0000 8/16/32 */ +#define SH4A_PCIEIDSETR0 (0x041000) /* R/W R 0x0000 FFFF 16/32 */ +#define SH4A_PCIEIDSETR1 (0x041004) /* R/W R 0xFF00 0000 16/32 */ +#define SH4A_PCIEBAR0SETR (0x041008) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEBAR1SETR (0x04100C) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEBAR2SETR (0x041010) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEBAR3SETR (0x041014) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEBAR4SETR (0x041018) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEBAR5SETR (0x04101C) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIECISSETR (0x041020) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEIDSETR2 (0x041024) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEEROMSETR (0x041028) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEDSERSETR0 (0x04102C) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEDSERSETR1 (0x041030) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIECTLR (0x041040) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIETLSR (0x041044) /* R/W1C R 0x0000 0000 16/32 */ +#define SH4A_PCIETLCTLR (0x041048) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEDLSR (0x04104C) /* R/W1C R 0x4003 0000 16/32 */ +#define SH4A_PCIEDLCTLR (0x041050) /* R R 0x0000 0000 16/32 */ +#define SH4A_PCIEMACSR (0x041054) /* R/W1C R 0x0041 0000 16/32 */ +#define SH4A_PCIEMACCTLR (0x041058) /* R/W R 0x0000 0000 16/32 */ +#define PCIEMACCTLR_SCR_DIS (1 << 27) /* scramble disable */ +#define SH4A_PCIEPMSTR (0x04105C) /* R/W1C R 0x0000 0000 16/32 */ +#define SH4A_PCIEPMCTLR (0x041060) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIETLINTENR (0x041064) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEDLINTENR (0x041068) /* R/W R 0x0000 0000 16/32 */ +#define PCIEDLINTENR_DLL_ACT_ENABLE (1 << 31) /* DL active irq */ +#define SH4A_PCIEMACINTENR (0x04106C) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIEPMINTENR (0x041070) /* R/W R 0x0000 0000 16/32 */ +#define SH4A_PCIETXDCTLR (0x044000) /* R/W - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXCTLR (0x044020) /* R/W - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXSR (0x044028) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXVC0DCTLR (0x044100) /* R/W - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXVC0SR (0x044108) /* R/W - H'00888000_00000000 32/64 */ +#define SH4A_PCIEVC0PDTXR (0x044110) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0PHTXR (0x044118) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0NPDTXR (0x044120) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0NPHTXR (0x044128) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0CDTXR (0x044130) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0CHTXR (0x044138) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXVCXDCTLR (0x044200) /* R/W - H'00000000_00000000 32/64 */ +#define SH4A_PCIETXVCXSR (0x044208) /* R/W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXPDTXR (0x044210) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXPHTXR (0x044218) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXNPDTXR (0x044220) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXNPHTXR (0x044228) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXCDTXR (0x044230) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXCHTXR (0x044238) /* W - H'00000000_00000000 32/64 */ +#define SH4A_PCIERDCTLR (0x046000) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIEERPCTLR (0x046008) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIEERPHR (0x046010) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEERPERR (0x046018) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIERXVC0DCTLR (0x046100) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIERXVC0SR (0x046108) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0PDRXR (0x046140) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0PHRXR (0x046148) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0PERR (0x046150) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0NPDRXR (0x046158) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0NPHRXR (0x046160) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0NPERR (0x046168) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0CDRXR (0x046170) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0CHRXR (0x046178) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVC0CERR (0x046180) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIERXVCXDCTLR (0x046200) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIERXVCXSR (0x046208) /* RW - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXPDRXR (0x046240) /* R - H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXPHRXR (0x046248) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXPERR (0x046250) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXNPDRXR (0x046258) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXNPHRXR (0x046260) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXNPERR (0x046268) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXCDRXR (0x046270) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXCHRXR (0x046278) /* R H'00000000_00000000 32/64 */ +#define SH4A_PCIEVCXCERR (0x046280) /* R H'00000000_00000000 32/64 */ + +/* SSI Register Definition for MSI WORK AROUND --hamada */ +#define SH4A_PCI_SSI_BASE 0xFFE00000 /* spw config address */ +#define SH4A_PCI_SSI_BASE_LEN 0x00100000 /* 1MB */ + +#define SH4A_SSICR0 (0x000000) +#define SH4A_SSICR1 (0x010000) +#define SH4A_SSICR2 (0x020000) +#define SH4A_SSICR3 (0x030000) + +#define PCI_REG(x) ((x) + 0x40000) + +static inline void +pci_write_reg(struct pci_channel *chan, unsigned long val, unsigned long reg) +{ + __raw_writel(val, chan->reg_base + reg); +} + +static inline unsigned long +pci_read_reg(struct pci_channel *chan, unsigned long reg) +{ + return __raw_readl(chan->reg_base + reg); +} + +#endif /* __PCI_SH7786_H */ diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c new file mode 100644 index 000000000..143747c45 --- /dev/null +++ b/arch/sh/drivers/platform_early.c @@ -0,0 +1,340 @@ +// SPDX--License-Identifier: GPL-2.0 + +#include <asm/platform_early.h> +#include <linux/mod_devicetable.h> +#include <linux/pm.h> + +static __initdata LIST_HEAD(sh_early_platform_driver_list); +static __initdata LIST_HEAD(sh_early_platform_device_list); + +static const struct platform_device_id * +platform_match_id(const struct platform_device_id *id, + struct platform_device *pdev) +{ + while (id->name[0]) { + if (strcmp(pdev->name, id->name) == 0) { + pdev->id_entry = id; + return id; + } + id++; + } + return NULL; +} + +static int platform_match(struct device *dev, struct device_driver *drv) +{ + struct platform_device *pdev = to_platform_device(dev); + struct platform_driver *pdrv = to_platform_driver(drv); + + /* When driver_override is set, only bind to the matching driver */ + if (pdev->driver_override) + return !strcmp(pdev->driver_override, drv->name); + + /* Then try to match against the id table */ + if (pdrv->id_table) + return platform_match_id(pdrv->id_table, pdev) != NULL; + + /* fall-back to driver name match */ + return (strcmp(pdev->name, drv->name) == 0); +} + +#ifdef CONFIG_PM +static void device_pm_init_common(struct device *dev) +{ + if (!dev->power.early_init) { + spin_lock_init(&dev->power.lock); + dev->power.qos = NULL; + dev->power.early_init = true; + } +} + +static void pm_runtime_early_init(struct device *dev) +{ + dev->power.disable_depth = 1; + device_pm_init_common(dev); +} +#else +static void pm_runtime_early_init(struct device *dev) {} +#endif + +/** + * sh_early_platform_driver_register - register early platform driver + * @epdrv: sh_early_platform driver structure + * @buf: string passed from early_param() + * + * Helper function for sh_early_platform_init() / sh_early_platform_init_buffer() + */ +int __init sh_early_platform_driver_register(struct sh_early_platform_driver *epdrv, + char *buf) +{ + char *tmp; + int n; + + /* Simply add the driver to the end of the global list. + * Drivers will by default be put on the list in compiled-in order. + */ + if (!epdrv->list.next) { + INIT_LIST_HEAD(&epdrv->list); + list_add_tail(&epdrv->list, &sh_early_platform_driver_list); + } + + /* If the user has specified device then make sure the driver + * gets prioritized. The driver of the last device specified on + * command line will be put first on the list. + */ + n = strlen(epdrv->pdrv->driver.name); + if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { + list_move(&epdrv->list, &sh_early_platform_driver_list); + + /* Allow passing parameters after device name */ + if (buf[n] == '\0' || buf[n] == ',') + epdrv->requested_id = -1; + else { + epdrv->requested_id = simple_strtoul(&buf[n + 1], + &tmp, 10); + + if (buf[n] != '.' || (tmp == &buf[n + 1])) { + epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; + n = 0; + } else + n += strcspn(&buf[n + 1], ",") + 1; + } + + if (buf[n] == ',') + n++; + + if (epdrv->bufsize) { + memcpy(epdrv->buffer, &buf[n], + min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); + epdrv->buffer[epdrv->bufsize - 1] = '\0'; + } + } + + return 0; +} + +/** + * sh_early_platform_add_devices - adds a number of early platform devices + * @devs: array of early platform devices to add + * @num: number of early platform devices in array + * + * Used by early architecture code to register early platform devices and + * their platform data. + */ +void __init sh_early_platform_add_devices(struct platform_device **devs, int num) +{ + struct device *dev; + int i; + + /* simply add the devices to list */ + for (i = 0; i < num; i++) { + dev = &devs[i]->dev; + + if (!dev->devres_head.next) { + pm_runtime_early_init(dev); + INIT_LIST_HEAD(&dev->devres_head); + list_add_tail(&dev->devres_head, + &sh_early_platform_device_list); + } + } +} + +/** + * sh_early_platform_driver_register_all - register early platform drivers + * @class_str: string to identify early platform driver class + * + * Used by architecture code to register all early platform drivers + * for a certain class. If omitted then only early platform drivers + * with matching kernel command line class parameters will be registered. + */ +void __init sh_early_platform_driver_register_all(char *class_str) +{ + /* The "class_str" parameter may or may not be present on the kernel + * command line. If it is present then there may be more than one + * matching parameter. + * + * Since we register our early platform drivers using early_param() + * we need to make sure that they also get registered in the case + * when the parameter is missing from the kernel command line. + * + * We use parse_early_options() to make sure the early_param() gets + * called at least once. The early_param() may be called more than + * once since the name of the preferred device may be specified on + * the kernel command line. sh_early_platform_driver_register() handles + * this case for us. + */ + parse_early_options(class_str); +} + +/** + * sh_early_platform_match - find early platform device matching driver + * @epdrv: early platform driver structure + * @id: id to match against + */ +static struct platform_device * __init +sh_early_platform_match(struct sh_early_platform_driver *epdrv, int id) +{ + struct platform_device *pd; + + list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head) + if (platform_match(&pd->dev, &epdrv->pdrv->driver)) + if (pd->id == id) + return pd; + + return NULL; +} + +/** + * sh_early_platform_left - check if early platform driver has matching devices + * @epdrv: early platform driver structure + * @id: return true if id or above exists + */ +static int __init sh_early_platform_left(struct sh_early_platform_driver *epdrv, + int id) +{ + struct platform_device *pd; + + list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head) + if (platform_match(&pd->dev, &epdrv->pdrv->driver)) + if (pd->id >= id) + return 1; + + return 0; +} + +/** + * sh_early_platform_driver_probe_id - probe drivers matching class_str and id + * @class_str: string to identify early platform driver class + * @id: id to match against + * @nr_probe: number of platform devices to successfully probe before exiting + */ +static int __init sh_early_platform_driver_probe_id(char *class_str, + int id, + int nr_probe) +{ + struct sh_early_platform_driver *epdrv; + struct platform_device *match; + int match_id; + int n = 0; + int left = 0; + + list_for_each_entry(epdrv, &sh_early_platform_driver_list, list) { + /* only use drivers matching our class_str */ + if (strcmp(class_str, epdrv->class_str)) + continue; + + if (id == -2) { + match_id = epdrv->requested_id; + left = 1; + + } else { + match_id = id; + left += sh_early_platform_left(epdrv, id); + + /* skip requested id */ + switch (epdrv->requested_id) { + case EARLY_PLATFORM_ID_ERROR: + case EARLY_PLATFORM_ID_UNSET: + break; + default: + if (epdrv->requested_id == id) + match_id = EARLY_PLATFORM_ID_UNSET; + } + } + + switch (match_id) { + case EARLY_PLATFORM_ID_ERROR: + pr_warn("%s: unable to parse %s parameter\n", + class_str, epdrv->pdrv->driver.name); + fallthrough; + case EARLY_PLATFORM_ID_UNSET: + match = NULL; + break; + default: + match = sh_early_platform_match(epdrv, match_id); + } + + if (match) { + /* + * Set up a sensible init_name to enable + * dev_name() and others to be used before the + * rest of the driver core is initialized. + */ + if (!match->dev.init_name && slab_is_available()) { + if (match->id != -1) + match->dev.init_name = + kasprintf(GFP_KERNEL, "%s.%d", + match->name, + match->id); + else + match->dev.init_name = + kasprintf(GFP_KERNEL, "%s", + match->name); + + if (!match->dev.init_name) + return -ENOMEM; + } + + if (epdrv->pdrv->probe(match)) + pr_warn("%s: unable to probe %s early.\n", + class_str, match->name); + else + n++; + } + + if (n >= nr_probe) + break; + } + + if (left) + return n; + else + return -ENODEV; +} + +/** + * sh_early_platform_driver_probe - probe a class of registered drivers + * @class_str: string to identify early platform driver class + * @nr_probe: number of platform devices to successfully probe before exiting + * @user_only: only probe user specified early platform devices + * + * Used by architecture code to probe registered early platform drivers + * within a certain class. For probe to happen a registered early platform + * device matching a registered early platform driver is needed. + */ +int __init sh_early_platform_driver_probe(char *class_str, + int nr_probe, + int user_only) +{ + int k, n, i; + + n = 0; + for (i = -2; n < nr_probe; i++) { + k = sh_early_platform_driver_probe_id(class_str, i, nr_probe - n); + + if (k < 0) + break; + + n += k; + + if (user_only) + break; + } + + return n; +} + +/** + * early_platform_cleanup - clean up early platform code + */ +void __init early_platform_cleanup(void) +{ + struct platform_device *pd, *pd2; + + /* clean up the devres list used to chain devices */ + list_for_each_entry_safe(pd, pd2, &sh_early_platform_device_list, + dev.devres_head) { + list_del(&pd->dev.devres_head); + memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); + } +} diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c new file mode 100644 index 000000000..2813140fd --- /dev/null +++ b/arch/sh/drivers/push-switch.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic push-switch framework + * + * Copyright (C) 2006 Paul Mundt + */ +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <asm/push-switch.h> + +#define DRV_NAME "push-switch" +#define DRV_VERSION "0.1.1" + +static ssize_t switch_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct push_switch_platform_info *psw_info = dev->platform_data; + return sprintf(buf, "%s\n", psw_info->name); +} +static DEVICE_ATTR_RO(switch); + +static void switch_timer(struct timer_list *t) +{ + struct push_switch *psw = from_timer(psw, t, debounce); + + schedule_work(&psw->work); +} + +static void switch_work_handler(struct work_struct *work) +{ + struct push_switch *psw = container_of(work, struct push_switch, work); + struct platform_device *pdev = psw->pdev; + + psw->state = 0; + + kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE); +} + +static int switch_drv_probe(struct platform_device *pdev) +{ + struct push_switch_platform_info *psw_info; + struct push_switch *psw; + int ret, irq; + + psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL); + if (unlikely(!psw)) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (unlikely(irq < 0)) { + ret = -ENODEV; + goto err; + } + + psw_info = pdev->dev.platform_data; + BUG_ON(!psw_info); + + ret = request_irq(irq, psw_info->irq_handler, + psw_info->irq_flags, + psw_info->name ? psw_info->name : DRV_NAME, pdev); + if (unlikely(ret < 0)) + goto err; + + if (psw_info->name) { + ret = device_create_file(&pdev->dev, &dev_attr_switch); + if (unlikely(ret)) { + dev_err(&pdev->dev, "Failed creating device attrs\n"); + ret = -EINVAL; + goto err_irq; + } + } + + INIT_WORK(&psw->work, switch_work_handler); + timer_setup(&psw->debounce, switch_timer, 0); + + /* Workqueue API brain-damage */ + psw->pdev = pdev; + + platform_set_drvdata(pdev, psw); + + return 0; + +err_irq: + free_irq(irq, pdev); +err: + kfree(psw); + return ret; +} + +static int switch_drv_remove(struct platform_device *pdev) +{ + struct push_switch *psw = platform_get_drvdata(pdev); + struct push_switch_platform_info *psw_info = pdev->dev.platform_data; + int irq = platform_get_irq(pdev, 0); + + if (psw_info->name) + device_remove_file(&pdev->dev, &dev_attr_switch); + + platform_set_drvdata(pdev, NULL); + flush_work(&psw->work); + del_timer_sync(&psw->debounce); + free_irq(irq, pdev); + + kfree(psw); + + return 0; +} + +static struct platform_driver switch_driver = { + .probe = switch_drv_probe, + .remove = switch_drv_remove, + .driver = { + .name = DRV_NAME, + }, +}; + +static int __init switch_init(void) +{ + printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION); + return platform_driver_register(&switch_driver); +} + +static void __exit switch_exit(void) +{ + platform_driver_unregister(&switch_driver); +} +module_init(switch_init); +module_exit(switch_exit); + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("Paul Mundt"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/drivers/superhyway/Makefile b/arch/sh/drivers/superhyway/Makefile new file mode 100644 index 000000000..aa6e3267c --- /dev/null +++ b/arch/sh/drivers/superhyway/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the SuperHyway specific kernel interface routines under Linux. +# + +obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += ops-sh4-202.o + diff --git a/arch/sh/drivers/superhyway/ops-sh4-202.c b/arch/sh/drivers/superhyway/ops-sh4-202.c new file mode 100644 index 000000000..490142274 --- /dev/null +++ b/arch/sh/drivers/superhyway/ops-sh4-202.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/drivers/superhyway/ops-sh4-202.c + * + * SuperHyway bus support for SH4-202 + * + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/superhyway.h> +#include <linux/string.h> +#include <asm/addrspace.h> +#include <asm/io.h> + +#define PHYS_EMI_CBLOCK P4SEGADDR(0x1ec00000) +#define PHYS_EMI_DBLOCK P4SEGADDR(0x08000000) +#define PHYS_FEMI_CBLOCK P4SEGADDR(0x1f800000) +#define PHYS_FEMI_DBLOCK P4SEGADDR(0x00000000) + +#define PHYS_EPBR_BLOCK P4SEGADDR(0x1de00000) +#define PHYS_DMAC_BLOCK P4SEGADDR(0x1fa00000) +#define PHYS_PBR_BLOCK P4SEGADDR(0x1fc00000) + +static struct resource emi_resources[] = { + [0] = { + .start = PHYS_EMI_CBLOCK, + .end = PHYS_EMI_CBLOCK + 0x00300000 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PHYS_EMI_DBLOCK, + .end = PHYS_EMI_DBLOCK + 0x08000000 - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct superhyway_device emi_device = { + .name = "emi", + .num_resources = ARRAY_SIZE(emi_resources), + .resource = emi_resources, +}; + +static struct resource femi_resources[] = { + [0] = { + .start = PHYS_FEMI_CBLOCK, + .end = PHYS_FEMI_CBLOCK + 0x00100000 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PHYS_FEMI_DBLOCK, + .end = PHYS_FEMI_DBLOCK + 0x08000000 - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct superhyway_device femi_device = { + .name = "femi", + .num_resources = ARRAY_SIZE(femi_resources), + .resource = femi_resources, +}; + +static struct resource epbr_resources[] = { + [0] = { + .start = P4SEGADDR(0x1e7ffff8), + .end = P4SEGADDR(0x1e7ffff8 + (sizeof(u32) * 2) - 1), + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PHYS_EPBR_BLOCK, + .end = PHYS_EPBR_BLOCK + 0x00a00000 - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct superhyway_device epbr_device = { + .name = "epbr", + .num_resources = ARRAY_SIZE(epbr_resources), + .resource = epbr_resources, +}; + +static struct resource dmac_resource = { + .start = PHYS_DMAC_BLOCK, + .end = PHYS_DMAC_BLOCK + 0x00100000 - 1, + .flags = IORESOURCE_MEM, +}; + +static struct superhyway_device dmac_device = { + .name = "dmac", + .num_resources = 1, + .resource = &dmac_resource, +}; + +static struct resource pbr_resources[] = { + [0] = { + .start = P4SEGADDR(0x1ffffff8), + .end = P4SEGADDR(0x1ffffff8 + (sizeof(u32) * 2) - 1), + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PHYS_PBR_BLOCK, + .end = PHYS_PBR_BLOCK + 0x00400000 - (sizeof(u32) * 2) - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct superhyway_device pbr_device = { + .name = "pbr", + .num_resources = ARRAY_SIZE(pbr_resources), + .resource = pbr_resources, +}; + +static struct superhyway_device *sh4202_devices[] __initdata = { + &emi_device, &femi_device, &epbr_device, &dmac_device, &pbr_device, +}; + +static int sh4202_read_vcr(unsigned long base, struct superhyway_vcr_info *vcr) +{ + u32 vcrh, vcrl; + u64 tmp; + + /* + * XXX: Even though the SH4-202 Evaluation Device documentation + * indicates that VCRL is mapped first with VCRH at a + 0x04 + * offset, the opposite seems to be true. + * + * Some modules (PBR and ePBR for instance) also appear to have + * VCRL/VCRH flipped in the documentation, but on the SH4-202 + * itself it appears that these are all consistently mapped with + * VCRH preceding VCRL. + * + * Do not trust the documentation, for it is evil. + */ + vcrh = __raw_readl(base); + vcrl = __raw_readl(base + sizeof(u32)); + + tmp = ((u64)vcrh << 32) | vcrl; + memcpy(vcr, &tmp, sizeof(u64)); + + return 0; +} + +static int sh4202_write_vcr(unsigned long base, struct superhyway_vcr_info vcr) +{ + u64 tmp = *(u64 *)&vcr; + + __raw_writel((tmp >> 32) & 0xffffffff, base); + __raw_writel(tmp & 0xffffffff, base + sizeof(u32)); + + return 0; +} + +static struct superhyway_ops sh4202_superhyway_ops = { + .read_vcr = sh4202_read_vcr, + .write_vcr = sh4202_write_vcr, +}; + +struct superhyway_bus superhyway_channels[] = { + { &sh4202_superhyway_ops, }, + { 0, }, +}; + +int __init superhyway_scan_bus(struct superhyway_bus *bus) +{ + return superhyway_add_devices(bus, sh4202_devices, + ARRAY_SIZE(sh4202_devices)); +} + |