diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/rapidio | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/rapidio/Kconfig | 104 | ||||
-rw-r--r-- | drivers/rapidio/Makefile | 14 | ||||
-rw-r--r-- | drivers/rapidio/devices/Kconfig | 11 | ||||
-rw-r--r-- | drivers/rapidio/devices/Makefile | 9 | ||||
-rw-r--r-- | drivers/rapidio/devices/rio_mport_cdev.c | 2644 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.c | 3002 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.h | 923 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 1042 | ||||
-rw-r--r-- | drivers/rapidio/rio-access.c | 143 | ||||
-rw-r--r-- | drivers/rapidio/rio-driver.c | 269 | ||||
-rw-r--r-- | drivers/rapidio/rio-scan.c | 1156 | ||||
-rw-r--r-- | drivers/rapidio/rio-sysfs.c | 366 | ||||
-rw-r--r-- | drivers/rapidio/rio.c | 2332 | ||||
-rw-r--r-- | drivers/rapidio/rio.h | 55 | ||||
-rw-r--r-- | drivers/rapidio/rio_cm.c | 2376 | ||||
-rw-r--r-- | drivers/rapidio/switches/Kconfig | 31 | ||||
-rw-r--r-- | drivers/rapidio/switches/Makefile | 10 | ||||
-rw-r--r-- | drivers/rapidio/switches/idt_gen2.c | 490 | ||||
-rw-r--r-- | drivers/rapidio/switches/idt_gen3.c | 378 | ||||
-rw-r--r-- | drivers/rapidio/switches/idtcps.c | 199 | ||||
-rw-r--r-- | drivers/rapidio/switches/tsi568.c | 195 | ||||
-rw-r--r-- | drivers/rapidio/switches/tsi57x.c | 365 |
22 files changed, 16114 insertions, 0 deletions
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig new file mode 100644 index 000000000..b9f851490 --- /dev/null +++ b/drivers/rapidio/Kconfig @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# RapidIO configuration +# + +config HAVE_RAPIDIO + bool + +menuconfig RAPIDIO + tristate "RapidIO support" + depends on HAVE_RAPIDIO || PCI + help + If you say Y here, the kernel will include drivers and + infrastructure code to support RapidIO interconnect devices. + +source "drivers/rapidio/devices/Kconfig" + +config RAPIDIO_DISC_TIMEOUT + int "Discovery timeout duration (seconds)" + depends on RAPIDIO + default "30" + help + Amount of time a discovery node waits for a host to complete + enumeration before giving up. + +config RAPIDIO_ENABLE_RX_TX_PORTS + bool "Enable RapidIO Input/Output Ports" + depends on RAPIDIO + help + The RapidIO specification describes a Output port transmit + enable and a Input port receive enable. The recommended state + for Input ports and Output ports should be disabled. When + this switch is set the RapidIO subsystem will enable all + ports for Input/Output direction to allow other traffic + than Maintenance transfers. + +config RAPIDIO_DMA_ENGINE + bool "DMA Engine support for RapidIO" + depends on RAPIDIO + depends on DMADEVICES + select DMA_ENGINE + help + Say Y here if you want to use DMA Engine frameork for RapidIO data + transfers to/from target RIO devices. RapidIO uses NREAD and + NWRITE (NWRITE_R, SWRITE) requests to transfer data between local + memory and memory on remote target device. You need a DMA controller + capable to perform data transfers to/from RapidIO. + + If you are unsure about this, say Y here. + +config RAPIDIO_DEBUG + bool "RapidIO subsystem debug messages" + depends on RAPIDIO + help + Say Y here if you want the RapidIO subsystem to produce a bunch of + debug messages to the system log. Select this if you are having a + problem with the RapidIO subsystem and want to see more of what is + going on. + + If you are unsure about this, say N here. + +choice + prompt "Enumeration method" + depends on RAPIDIO + default RAPIDIO_ENUM_BASIC + help + There are different enumeration and discovery mechanisms offered + for RapidIO subsystem. You may select single built-in method or + or any number of methods to be built as modules. + Selecting a built-in method disables use of loadable methods. + + If unsure, select Basic built-in. + +config RAPIDIO_ENUM_BASIC + tristate "Basic" + help + This option includes basic RapidIO fabric enumeration and discovery + mechanism similar to one described in RapidIO specification Annex 1. + +endchoice + +config RAPIDIO_CHMAN + tristate "RapidIO Channelized Messaging driver" + depends on RAPIDIO + help + This option includes RapidIO channelized messaging driver which + provides socket-like interface to allow sharing of single RapidIO + messaging mailbox between multiple user-space applications. + See "Documentation/driver-api/rapidio/rio_cm.rst" for driver description. + +config RAPIDIO_MPORT_CDEV + tristate "RapidIO /dev mport device driver" + depends on RAPIDIO + help + This option includes generic RapidIO mport device driver which + allows to user space applications to perform RapidIO-specific + operations through selected RapidIO mport. + +menu "RapidIO Switch drivers" + depends on RAPIDIO + +source "drivers/rapidio/switches/Kconfig" + +endmenu diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile new file mode 100644 index 000000000..a34b0254b --- /dev/null +++ b/drivers/rapidio/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for RapidIO interconnect services +# +obj-$(CONFIG_RAPIDIO) += rapidio.o +rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o + +obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o +obj-$(CONFIG_RAPIDIO_CHMAN) += rio_cm.o + +obj-$(CONFIG_RAPIDIO) += switches/ +obj-$(CONFIG_RAPIDIO) += devices/ + +subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig new file mode 100644 index 000000000..c416531ad --- /dev/null +++ b/drivers/rapidio/devices/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# RapidIO master port configuration +# + +config RAPIDIO_TSI721 + tristate "IDT Tsi721 PCI Express SRIO Controller support" + depends on RAPIDIO && PCIEPORTBUS + default "n" + help + Include support for IDT Tsi721 PCI Express Serial RapidIO controller. diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile new file mode 100644 index 000000000..bf0e2e4d0 --- /dev/null +++ b/drivers/rapidio/devices/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for RapidIO devices +# + +obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o +tsi721_mport-y := tsi721.o +tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o +obj-$(CONFIG_RAPIDIO_MPORT_CDEV) += rio_mport_cdev.o diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c new file mode 100644 index 000000000..5ac2dc1e2 --- /dev/null +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -0,0 +1,2644 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO mport character device + * + * Copyright 2014-2015 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * Copyright 2014-2015 Prodrive Technologies + * Andre van Herk <andre.van.herk@prodrive-technologies.com> + * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> + * Copyright (C) 2014 Texas Instruments Incorporated + * Aurelien Jacquiot <a-jacquiot@ti.com> + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cdev.h> +#include <linux/ioctl.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/net.h> +#include <linux/poll.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/kfifo.h> + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> + +#include <linux/dma-mapping.h> +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +#include <linux/dmaengine.h> +#endif + +#include <linux/rio.h> +#include <linux/rio_ids.h> +#include <linux/rio_drv.h> +#include <linux/rio_mport_cdev.h> + +#include "../rio.h" + +#define DRV_NAME "rio_mport" +#define DRV_PREFIX DRV_NAME ": " +#define DEV_NAME "rio_mport" +#define DRV_VERSION "1.0.0" + +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_RDEV = BIT(3), /* RapidIO device add/remove */ + DBG_DMA = BIT(4), /* DMA transfer messages */ + DBG_MMAP = BIT(5), /* mapping messages */ + DBG_IBW = BIT(6), /* inbound window */ + DBG_EVENT = BIT(7), /* event handling messages */ + DBG_OBW = BIT(8), /* outbound window messages */ + DBG_DBELL = BIT(9), /* doorbell messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +#define rmcd_debug(level, fmt, arg...) \ + do { \ + if (DBG_##level & dbg_level) \ + pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ + } while (0) +#else +#define rmcd_debug(level, fmt, arg...) \ + no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) +#endif + +#define rmcd_warn(fmt, arg...) \ + pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) + +#define rmcd_error(fmt, arg...) \ + pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) + +MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>"); +MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>"); +MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); +MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>"); +MODULE_DESCRIPTION("RapidIO mport character device driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int dma_timeout = 3000; /* DMA transfer timeout in msec */ +module_param(dma_timeout, int, S_IRUGO); +MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); + +#ifdef DEBUG +static u32 dbg_level = DBG_NONE; +module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif + +/* + * An internal DMA coherent buffer + */ +struct mport_dma_buf { + void *ib_base; + dma_addr_t ib_phys; + u32 ib_size; + u64 ib_rio_base; + bool ib_map; + struct file *filp; +}; + +/* + * Internal memory mapping structure + */ +enum rio_mport_map_dir { + MAP_INBOUND, + MAP_OUTBOUND, + MAP_DMA, +}; + +struct rio_mport_mapping { + struct list_head node; + struct mport_dev *md; + enum rio_mport_map_dir dir; + u16 rioid; + u64 rio_addr; + dma_addr_t phys_addr; /* for mmap */ + void *virt_addr; /* kernel address, for dma_free_coherent */ + u64 size; + struct kref ref; /* refcount of vmas sharing the mapping */ + struct file *filp; +}; + +struct rio_mport_dma_map { + int valid; + u64 length; + void *vaddr; + dma_addr_t paddr; +}; + +#define MPORT_MAX_DMA_BUFS 16 +#define MPORT_EVENT_DEPTH 10 + +/* + * mport_dev driver-specific structure that represents mport device + * @active mport device status flag + * @node list node to maintain list of registered mports + * @cdev character device + * @dev associated device object + * @mport associated subsystem's master port device object + * @buf_mutex lock for buffer handling + * @file_mutex - lock for open files list + * @file_list - list of open files on given mport + * @properties properties of this mport + * @portwrites queue of inbound portwrites + * @pw_lock lock for port write queue + * @mappings queue for memory mappings + * @dma_chan DMA channels associated with this device + * @dma_ref: + * @comp: + */ +struct mport_dev { + atomic_t active; + struct list_head node; + struct cdev cdev; + struct device dev; + struct rio_mport *mport; + struct mutex buf_mutex; + struct mutex file_mutex; + struct list_head file_list; + struct rio_mport_properties properties; + struct list_head doorbells; + spinlock_t db_lock; + struct list_head portwrites; + spinlock_t pw_lock; + struct list_head mappings; +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct dma_chan *dma_chan; + struct kref dma_ref; + struct completion comp; +#endif +}; + +/* + * mport_cdev_priv - data structure specific to individual file object + * associated with an open device + * @md master port character device object + * @async_queue - asynchronous notification queue + * @list - file objects tracking list + * @db_filters inbound doorbell filters for this descriptor + * @pw_filters portwrite filters for this descriptor + * @event_fifo event fifo for this descriptor + * @event_rx_wait wait queue for this descriptor + * @fifo_lock lock for event_fifo + * @event_mask event mask for this descriptor + * @dmach DMA engine channel allocated for specific file object + */ +struct mport_cdev_priv { + struct mport_dev *md; + struct fasync_struct *async_queue; + struct list_head list; + struct list_head db_filters; + struct list_head pw_filters; + struct kfifo event_fifo; + wait_queue_head_t event_rx_wait; + spinlock_t fifo_lock; + u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct dma_chan *dmach; + struct list_head async_list; + spinlock_t req_lock; + struct mutex dma_lock; + struct kref dma_ref; + struct completion comp; +#endif +}; + +/* + * rio_mport_pw_filter - structure to describe a portwrite filter + * md_node node in mport device's list + * priv_node node in private file object's list + * priv reference to private data + * filter actual portwrite filter + */ +struct rio_mport_pw_filter { + struct list_head md_node; + struct list_head priv_node; + struct mport_cdev_priv *priv; + struct rio_pw_filter filter; +}; + +/* + * rio_mport_db_filter - structure to describe a doorbell filter + * @data_node reference to device node + * @priv_node node in private data + * @priv reference to private data + * @filter actual doorbell filter + */ +struct rio_mport_db_filter { + struct list_head data_node; + struct list_head priv_node; + struct mport_cdev_priv *priv; + struct rio_doorbell_filter filter; +}; + +static LIST_HEAD(mport_devs); +static DEFINE_MUTEX(mport_devs_lock); + +#if (0) /* used by commented out portion of poll function : FIXME */ +static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); +#endif + +static struct class *dev_class; +static dev_t dev_number; + +static void mport_release_mapping(struct kref *ref); + +static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, + int local) +{ + struct rio_mport *mport = priv->md->mport; + struct rio_mport_maint_io maint_io; + u32 *buffer; + u32 offset; + size_t length; + int ret, i; + + if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) + return -EFAULT; + + if ((maint_io.offset % 4) || + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) + return -EINVAL; + + buffer = vmalloc(maint_io.length); + if (buffer == NULL) + return -ENOMEM; + length = maint_io.length/sizeof(u32); + offset = maint_io.offset; + + for (i = 0; i < length; i++) { + if (local) + ret = __rio_local_read_config_32(mport, + offset, &buffer[i]); + else + ret = rio_mport_read_config_32(mport, maint_io.rioid, + maint_io.hopcount, offset, &buffer[i]); + if (ret) + goto out; + + offset += 4; + } + + if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, + buffer, maint_io.length))) + ret = -EFAULT; +out: + vfree(buffer); + return ret; +} + +static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, + int local) +{ + struct rio_mport *mport = priv->md->mport; + struct rio_mport_maint_io maint_io; + u32 *buffer; + u32 offset; + size_t length; + int ret = -EINVAL, i; + + if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) + return -EFAULT; + + if ((maint_io.offset % 4) || + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) + return -EINVAL; + + buffer = vmalloc(maint_io.length); + if (buffer == NULL) + return -ENOMEM; + length = maint_io.length; + + if (unlikely(copy_from_user(buffer, + (void __user *)(uintptr_t)maint_io.buffer, length))) { + ret = -EFAULT; + goto out; + } + + offset = maint_io.offset; + length /= sizeof(u32); + + for (i = 0; i < length; i++) { + if (local) + ret = __rio_local_write_config_32(mport, + offset, buffer[i]); + else + ret = rio_mport_write_config_32(mport, maint_io.rioid, + maint_io.hopcount, + offset, buffer[i]); + if (ret) + goto out; + + offset += 4; + } + +out: + vfree(buffer); + return ret; +} + + +/* + * Inbound/outbound memory mapping functions + */ +static int +rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, + u16 rioid, u64 raddr, u32 size, + dma_addr_t *paddr) +{ + struct rio_mport *mport = md->mport; + struct rio_mport_mapping *map; + int ret; + + rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); + if (ret < 0) + goto err_map_outb; + + map->dir = MAP_OUTBOUND; + map->rioid = rioid; + map->rio_addr = raddr; + map->size = size; + map->phys_addr = *paddr; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + list_add_tail(&map->node, &md->mappings); + return 0; +err_map_outb: + kfree(map); + return ret; +} + +static int +rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, + u16 rioid, u64 raddr, u32 size, + dma_addr_t *paddr) +{ + struct rio_mport_mapping *map; + int err = -ENOMEM; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (map->dir != MAP_OUTBOUND) + continue; + if (rioid == map->rioid && + raddr == map->rio_addr && size == map->size) { + *paddr = map->phys_addr; + err = 0; + break; + } else if (rioid == map->rioid && + raddr < (map->rio_addr + map->size - 1) && + (raddr + size) > map->rio_addr) { + err = -EBUSY; + break; + } + } + + /* If not found, create new */ + if (err == -ENOMEM) + err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, + size, paddr); + mutex_unlock(&md->buf_mutex); + return err; +} + +static int rio_mport_obw_map(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *data = priv->md; + struct rio_mmap map; + dma_addr_t paddr; + int ret; + + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) + return -EFAULT; + + rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", + map.rioid, map.rio_addr, map.length); + + ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, + map.rio_addr, map.length, &paddr); + if (ret < 0) { + rmcd_error("Failed to set OBW err= %d", ret); + return ret; + } + + map.handle = paddr; + + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) + return -EFAULT; + return 0; +} + +/* + * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space + * + * @priv: driver private data + * @arg: buffer handle returned by allocation routine + */ +static int rio_mport_obw_free(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + struct rio_mport_mapping *map, *_map; + + if (!md->mport->ops->unmap_outb) + return -EPROTONOSUPPORT; + + if (copy_from_user(&handle, arg, sizeof(handle))) + return -EFAULT; + + rmcd_debug(OBW, "h=0x%llx", handle); + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { + if (map->filp == filp) { + rmcd_debug(OBW, "kref_put h=0x%llx", handle); + map->filp = NULL; + kref_put(&map->ref, mport_release_mapping); + } + break; + } + } + mutex_unlock(&md->buf_mutex); + + return 0; +} + +/* + * maint_hdid_set() - Set the host Device ID + * @priv: driver private data + * @arg: Device Id + */ +static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + u16 hdid; + + if (copy_from_user(&hdid, arg, sizeof(hdid))) + return -EFAULT; + + md->mport->host_deviceid = hdid; + md->properties.hdid = hdid; + rio_local_set_device_id(md->mport, hdid); + + rmcd_debug(MPORT, "Set host device Id to %d", hdid); + + return 0; +} + +/* + * maint_comptag_set() - Set the host Component Tag + * @priv: driver private data + * @arg: Component Tag + */ +static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + u32 comptag; + + if (copy_from_user(&comptag, arg, sizeof(comptag))) + return -EFAULT; + + rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); + + rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); + + return 0; +} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +struct mport_dma_req { + struct kref refcount; + struct list_head node; + struct file *filp; + struct mport_cdev_priv *priv; + enum rio_transfer_sync sync; + struct sg_table sgt; + struct page **page_list; + unsigned int nr_pages; + struct rio_mport_mapping *map; + struct dma_chan *dmach; + enum dma_data_direction dir; + dma_cookie_t cookie; + enum dma_status status; + struct completion req_comp; +}; + +static void mport_release_def_dma(struct kref *dma_ref) +{ + struct mport_dev *md = + container_of(dma_ref, struct mport_dev, dma_ref); + + rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); + rio_release_dma(md->dma_chan); + md->dma_chan = NULL; +} + +static void mport_release_dma(struct kref *dma_ref) +{ + struct mport_cdev_priv *priv = + container_of(dma_ref, struct mport_cdev_priv, dma_ref); + + rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); + complete(&priv->comp); +} + +static void dma_req_free(struct kref *ref) +{ + struct mport_dma_req *req = container_of(ref, struct mport_dma_req, + refcount); + struct mport_cdev_priv *priv = req->priv; + + dma_unmap_sg(req->dmach->device->dev, + req->sgt.sgl, req->sgt.nents, req->dir); + sg_free_table(&req->sgt); + if (req->page_list) { + unpin_user_pages(req->page_list, req->nr_pages); + kfree(req->page_list); + } + + if (req->map) { + mutex_lock(&req->map->md->buf_mutex); + kref_put(&req->map->ref, mport_release_mapping); + mutex_unlock(&req->map->md->buf_mutex); + } + + kref_put(&priv->dma_ref, mport_release_dma); + + kfree(req); +} + +static void dma_xfer_callback(void *param) +{ + struct mport_dma_req *req = (struct mport_dma_req *)param; + struct mport_cdev_priv *priv = req->priv; + + req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, + NULL, NULL); + complete(&req->req_comp); + kref_put(&req->refcount, dma_req_free); +} + +/* + * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA + * transfer object. + * Returns pointer to DMA transaction descriptor allocated by DMA driver on + * success or ERR_PTR (and/or NULL) if failed. Caller must check returned + * non-NULL pointer using IS_ERR macro. + */ +static struct dma_async_tx_descriptor +*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, + struct sg_table *sgt, int nents, enum dma_transfer_direction dir, + enum dma_ctrl_flags flags) +{ + struct rio_dma_data tx_data; + + tx_data.sg = sgt->sgl; + tx_data.sg_len = nents; + tx_data.rio_addr_u = 0; + tx_data.rio_addr = transfer->rio_addr; + if (dir == DMA_MEM_TO_DEV) { + switch (transfer->method) { + case RIO_EXCHANGE_NWRITE: + tx_data.wr_type = RDW_ALL_NWRITE; + break; + case RIO_EXCHANGE_NWRITE_R_ALL: + tx_data.wr_type = RDW_ALL_NWRITE_R; + break; + case RIO_EXCHANGE_NWRITE_R: + tx_data.wr_type = RDW_LAST_NWRITE_R; + break; + case RIO_EXCHANGE_DEFAULT: + tx_data.wr_type = RDW_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + } + + return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); +} + +/* Request DMA channel associated with this mport device. + * Try to request DMA channel for every new process that opened given + * mport. If a new DMA channel is not available use default channel + * which is the first DMA channel opened on mport device. + */ +static int get_dma_channel(struct mport_cdev_priv *priv) +{ + mutex_lock(&priv->dma_lock); + if (!priv->dmach) { + priv->dmach = rio_request_mport_dma(priv->md->mport); + if (!priv->dmach) { + /* Use default DMA channel if available */ + if (priv->md->dma_chan) { + priv->dmach = priv->md->dma_chan; + kref_get(&priv->md->dma_ref); + } else { + rmcd_error("Failed to get DMA channel"); + mutex_unlock(&priv->dma_lock); + return -ENODEV; + } + } else if (!priv->md->dma_chan) { + /* Register default DMA channel if we do not have one */ + priv->md->dma_chan = priv->dmach; + kref_init(&priv->md->dma_ref); + rmcd_debug(DMA, "Register DMA_chan %d as default", + priv->dmach->chan_id); + } + + kref_init(&priv->dma_ref); + init_completion(&priv->comp); + } + + kref_get(&priv->dma_ref); + mutex_unlock(&priv->dma_lock); + return 0; +} + +static void put_dma_channel(struct mport_cdev_priv *priv) +{ + kref_put(&priv->dma_ref, mport_release_dma); +} + +/* + * DMA transfer functions + */ +static int do_dma_request(struct mport_dma_req *req, + struct rio_transfer_io *xfer, + enum rio_transfer_sync sync, int nents) +{ + struct mport_cdev_priv *priv; + struct sg_table *sgt; + struct dma_chan *chan; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + unsigned long tmo = msecs_to_jiffies(dma_timeout); + enum dma_transfer_direction dir; + long wret; + int ret = 0; + + priv = req->priv; + sgt = &req->sgt; + + chan = priv->dmach; + dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; + + rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", + current->comm, task_pid_nr(current), + dev_name(&chan->dev->device), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + + /* Initialize DMA transaction request */ + tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + + if (!tx) { + rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + xfer->rio_addr, xfer->length); + ret = -EIO; + goto err_out; + } else if (IS_ERR(tx)) { + ret = PTR_ERR(tx); + rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + xfer->rio_addr, xfer->length); + goto err_out; + } + + tx->callback = dma_xfer_callback; + tx->callback_param = req; + + req->status = DMA_IN_PROGRESS; + kref_get(&req->refcount); + + cookie = dmaengine_submit(tx); + req->cookie = cookie; + + rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + + if (dma_submit_error(cookie)) { + rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", + cookie, xfer->rio_addr, xfer->length); + kref_put(&req->refcount, dma_req_free); + ret = -EIO; + goto err_out; + } + + dma_async_issue_pending(chan); + + if (sync == RIO_TRANSFER_ASYNC) { + spin_lock(&priv->req_lock); + list_add_tail(&req->node, &priv->async_list); + spin_unlock(&priv->req_lock); + return cookie; + } else if (sync == RIO_TRANSFER_FAF) + return 0; + + wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); + + if (wret == 0) { + /* Timeout on wait occurred */ + rmcd_error("%s(%d) timed out waiting for DMA_%s %d", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + return -ETIMEDOUT; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal but DMA may + * be in progress + */ + rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + return -EINTR; + } + + if (req->status != DMA_COMPLETE) { + /* DMA transaction completion was signaled with error */ + rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + cookie, req->status, ret); + ret = -EIO; + } + +err_out: + return ret; +} + +/* + * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from + * the remote RapidIO device + * @filp: file pointer associated with the call + * @transfer_mode: DMA transfer mode + * @sync: synchronization mode + * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR + * DMA_DEV_TO_MEM = read) + * @xfer: data transfer descriptor structure + */ +static int +rio_dma_transfer(struct file *filp, u32 transfer_mode, + enum rio_transfer_sync sync, enum dma_data_direction dir, + struct rio_transfer_io *xfer) +{ + struct mport_cdev_priv *priv = filp->private_data; + unsigned long nr_pages = 0; + struct page **page_list = NULL; + struct mport_dma_req *req; + struct mport_dev *md = priv->md; + struct dma_chan *chan; + int ret; + int nents; + + if (xfer->length == 0) + return -EINVAL; + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = get_dma_channel(priv); + if (ret) { + kfree(req); + return ret; + } + chan = priv->dmach; + + kref_init(&req->refcount); + init_completion(&req->req_comp); + req->dir = dir; + req->filp = filp; + req->priv = priv; + req->dmach = chan; + req->sync = sync; + + /* + * If parameter loc_addr != NULL, we are transferring data from/to + * data buffer allocated in user-space: lock in memory user-space + * buffer pages and build an SG table for DMA transfer request + * + * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is + * used for DMA data transfers: build single entry SG table using + * offset within the internal buffer specified by handle parameter. + */ + if (xfer->loc_addr) { + unsigned int offset; + long pinned; + + offset = lower_32_bits(offset_in_page(xfer->loc_addr)); + nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; + + page_list = kmalloc_array(nr_pages, + sizeof(*page_list), GFP_KERNEL); + if (page_list == NULL) { + ret = -ENOMEM; + goto err_req; + } + + pinned = pin_user_pages_fast( + (unsigned long)xfer->loc_addr & PAGE_MASK, + nr_pages, + dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, + page_list); + + if (pinned != nr_pages) { + if (pinned < 0) { + rmcd_error("pin_user_pages_fast err=%ld", + pinned); + nr_pages = 0; + } else { + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); + /* + * Set nr_pages up to mean "how many pages to unpin, in + * the error handler: + */ + nr_pages = pinned; + } + ret = -EFAULT; + goto err_pg; + } + + ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, + offset, xfer->length, GFP_KERNEL); + if (ret) { + rmcd_error("sg_alloc_table failed with err=%d", ret); + goto err_pg; + } + + req->page_list = page_list; + req->nr_pages = nr_pages; + } else { + dma_addr_t baddr; + struct rio_mport_mapping *map; + + baddr = (dma_addr_t)xfer->handle; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (baddr >= map->phys_addr && + baddr < (map->phys_addr + map->size)) { + kref_get(&map->ref); + req->map = map; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (req->map == NULL) { + ret = -ENOMEM; + goto err_req; + } + + if (xfer->length + xfer->offset > map->size) { + ret = -EINVAL; + goto err_req; + } + + ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); + if (unlikely(ret)) { + rmcd_error("sg_alloc_table failed for internal buf"); + goto err_req; + } + + sg_set_buf(req->sgt.sgl, + map->virt_addr + (baddr - map->phys_addr) + + xfer->offset, xfer->length); + } + + nents = dma_map_sg(chan->device->dev, + req->sgt.sgl, req->sgt.nents, dir); + if (nents == 0) { + rmcd_error("Failed to map SG list"); + ret = -EFAULT; + goto err_pg; + } + + ret = do_dma_request(req, xfer, sync, nents); + + if (ret >= 0) { + if (sync == RIO_TRANSFER_ASYNC) + return ret; /* return ASYNC cookie */ + } else { + rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); + } + +err_pg: + if (!req->page_list) { + unpin_user_pages(page_list, nr_pages); + kfree(page_list); + } +err_req: + kref_put(&req->refcount, dma_req_free); + return ret; +} + +static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct rio_transaction transaction; + struct rio_transfer_io *transfer; + enum dma_data_direction dir; + int i, ret = 0; + + if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) + return -EFAULT; + + if (transaction.count != 1) /* only single transfer for now */ + return -EINVAL; + + if ((transaction.transfer_mode & + priv->md->properties.transfer_mode) == 0) + return -ENODEV; + + transfer = vmalloc(array_size(sizeof(*transfer), transaction.count)); + if (!transfer) + return -ENOMEM; + + if (unlikely(copy_from_user(transfer, + (void __user *)(uintptr_t)transaction.block, + array_size(sizeof(*transfer), transaction.count)))) { + ret = -EFAULT; + goto out_free; + } + + dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE; + for (i = 0; i < transaction.count && ret == 0; i++) + ret = rio_dma_transfer(filp, transaction.transfer_mode, + transaction.sync, dir, &transfer[i]); + + if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, + transfer, + array_size(sizeof(*transfer), transaction.count)))) + ret = -EFAULT; + +out_free: + vfree(transfer); + + return ret; +} + +static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv; + struct rio_async_tx_wait w_param; + struct mport_dma_req *req; + dma_cookie_t cookie; + unsigned long tmo; + long wret; + int found = 0; + int ret; + + priv = (struct mport_cdev_priv *)filp->private_data; + + if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) + return -EFAULT; + + cookie = w_param.token; + if (w_param.timeout) + tmo = msecs_to_jiffies(w_param.timeout); + else /* Use default DMA timeout */ + tmo = msecs_to_jiffies(dma_timeout); + + spin_lock(&priv->req_lock); + list_for_each_entry(req, &priv->async_list, node) { + if (req->cookie == cookie) { + list_del(&req->node); + found = 1; + break; + } + } + spin_unlock(&priv->req_lock); + + if (!found) + return -EAGAIN; + + wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); + + if (wret == 0) { + /* Timeout on wait occurred */ + rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); + ret = -ETIMEDOUT; + goto err_tmo; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal but DMA may + * be still in progress + */ + rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); + ret = -EINTR; + goto err_tmo; + } + + if (req->status != DMA_COMPLETE) { + /* DMA transaction completion signaled with transfer error */ + rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", + req->status); + ret = -EIO; + } else + ret = 0; + + if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) + kref_put(&req->refcount, dma_req_free); + + return ret; + +err_tmo: + /* Return request back into async queue */ + spin_lock(&priv->req_lock); + list_add_tail(&req->node, &priv->async_list); + spin_unlock(&priv->req_lock); + return ret; +} + +static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, + u64 size, struct rio_mport_mapping **mapping) +{ + struct rio_mport_mapping *map; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, + &map->phys_addr, GFP_KERNEL); + if (map->virt_addr == NULL) { + kfree(map); + return -ENOMEM; + } + + map->dir = MAP_DMA; + map->size = size; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + mutex_lock(&md->buf_mutex); + list_add_tail(&map->node, &md->mappings); + mutex_unlock(&md->buf_mutex); + *mapping = map; + + return 0; +} + +static int rio_mport_alloc_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + struct rio_dma_mem map; + struct rio_mport_mapping *mapping = NULL; + int ret; + + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) + return -EFAULT; + + ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); + if (ret) + return ret; + + map.dma_handle = mapping->phys_addr; + + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { + mutex_lock(&md->buf_mutex); + kref_put(&mapping->ref, mport_release_mapping); + mutex_unlock(&md->buf_mutex); + return -EFAULT; + } + + return 0; +} + +static int rio_mport_free_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + int ret = -EFAULT; + struct rio_mport_mapping *map, *_map; + + if (copy_from_user(&handle, arg, sizeof(handle))) + return -EFAULT; + rmcd_debug(EXIT, "filp=%p", filp); + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_DMA && map->phys_addr == handle && + map->filp == filp) { + kref_put(&map->ref, mport_release_mapping); + ret = 0; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (ret == -EFAULT) { + rmcd_debug(DMA, "ERR no matching mapping"); + return ret; + } + + return 0; +} +#else +static int rio_mport_transfer_ioctl(struct file *filp, void *arg) +{ + return -ENODEV; +} + +static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} + +static int rio_mport_alloc_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} + +static int rio_mport_free_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/* + * Inbound/outbound memory mapping functions + */ + +static int +rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, + u64 raddr, u64 size, + struct rio_mport_mapping **mapping) +{ + struct rio_mport *mport = md->mport; + struct rio_mport_mapping *map; + int ret; + + /* rio_map_inb_region() accepts u32 size */ + if (size > 0xffffffff) + return -EINVAL; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, + &map->phys_addr, GFP_KERNEL); + if (map->virt_addr == NULL) { + ret = -ENOMEM; + goto err_dma_alloc; + } + + if (raddr == RIO_MAP_ANY_ADDR) + raddr = map->phys_addr; + ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); + if (ret < 0) + goto err_map_inb; + + map->dir = MAP_INBOUND; + map->rio_addr = raddr; + map->size = size; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + mutex_lock(&md->buf_mutex); + list_add_tail(&map->node, &md->mappings); + mutex_unlock(&md->buf_mutex); + *mapping = map; + return 0; + +err_map_inb: + dma_free_coherent(mport->dev.parent, size, + map->virt_addr, map->phys_addr); +err_dma_alloc: + kfree(map); + return ret; +} + +static int +rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, + u64 raddr, u64 size, + struct rio_mport_mapping **mapping) +{ + struct rio_mport_mapping *map; + int err = -ENOMEM; + + if (raddr == RIO_MAP_ANY_ADDR) + goto get_new; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (map->dir != MAP_INBOUND) + continue; + if (raddr == map->rio_addr && size == map->size) { + /* allow exact match only */ + *mapping = map; + err = 0; + break; + } else if (raddr < (map->rio_addr + map->size - 1) && + (raddr + size) > map->rio_addr) { + err = -EBUSY; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (err != -ENOMEM) + return err; +get_new: + /* not found, create new */ + return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); +} + +static int rio_mport_map_inbound(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + struct rio_mmap map; + struct rio_mport_mapping *mapping = NULL; + int ret; + + if (!md->mport->ops->map_inb) + return -EPROTONOSUPPORT; + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) + return -EFAULT; + + rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); + + ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, + map.length, &mapping); + if (ret) + return ret; + + map.handle = mapping->phys_addr; + map.rio_addr = mapping->rio_addr; + + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { + /* Delete mapping if it was created by this request */ + if (ret == 0 && mapping->filp == filp) { + mutex_lock(&md->buf_mutex); + kref_put(&mapping->ref, mport_release_mapping); + mutex_unlock(&md->buf_mutex); + } + return -EFAULT; + } + + return 0; +} + +/* + * rio_mport_inbound_free() - unmap from RapidIO address space and free + * previously allocated inbound DMA coherent buffer + * @priv: driver private data + * @arg: buffer handle returned by allocation routine + */ +static int rio_mport_inbound_free(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + struct rio_mport_mapping *map, *_map; + + rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); + + if (!md->mport->ops->unmap_inb) + return -EPROTONOSUPPORT; + + if (copy_from_user(&handle, arg, sizeof(handle))) + return -EFAULT; + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_INBOUND && map->phys_addr == handle) { + if (map->filp == filp) { + map->filp = NULL; + kref_put(&map->ref, mport_release_mapping); + } + break; + } + } + mutex_unlock(&md->buf_mutex); + + return 0; +} + +/* + * maint_port_idx_get() - Get the port index of the mport instance + * @priv: driver private data + * @arg: port index + */ +static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + u32 port_idx = md->mport->index; + + rmcd_debug(MPORT, "port_index=%d", port_idx); + + if (copy_to_user(arg, &port_idx, sizeof(port_idx))) + return -EFAULT; + + return 0; +} + +static int rio_mport_add_event(struct mport_cdev_priv *priv, + struct rio_event *event) +{ + int overflow; + + if (!(priv->event_mask & event->header)) + return -EACCES; + + spin_lock(&priv->fifo_lock); + overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) + || kfifo_in(&priv->event_fifo, (unsigned char *)event, + sizeof(*event)) != sizeof(*event); + spin_unlock(&priv->fifo_lock); + + wake_up_interruptible(&priv->event_rx_wait); + + if (overflow) { + dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); + return -EBUSY; + } + + return 0; +} + +static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, + u16 src, u16 dst, u16 info) +{ + struct mport_dev *data = dev_id; + struct mport_cdev_priv *priv; + struct rio_mport_db_filter *db_filter; + struct rio_event event; + int handled; + + event.header = RIO_DOORBELL; + event.u.doorbell.rioid = src; + event.u.doorbell.payload = info; + + handled = 0; + spin_lock(&data->db_lock); + list_for_each_entry(db_filter, &data->doorbells, data_node) { + if (((db_filter->filter.rioid == RIO_INVALID_DESTID || + db_filter->filter.rioid == src)) && + info >= db_filter->filter.low && + info <= db_filter->filter.high) { + priv = db_filter->priv; + rio_mport_add_event(priv, &event); + handled = 1; + } + } + spin_unlock(&data->db_lock); + + if (!handled) + dev_warn(&data->dev, + "%s: spurious DB received from 0x%x, info=0x%04x\n", + __func__, src, info); +} + +static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_db_filter *db_filter; + struct rio_doorbell_filter filter; + unsigned long flags; + int ret; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + if (filter.low > filter.high) + return -EINVAL; + + ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, + rio_mport_doorbell_handler); + if (ret) { + rmcd_error("%s failed to register IBDB, err=%d", + dev_name(&md->dev), ret); + return ret; + } + + db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL); + if (db_filter == NULL) { + rio_release_inb_dbell(md->mport, filter.low, filter.high); + return -ENOMEM; + } + + db_filter->filter = filter; + db_filter->priv = priv; + spin_lock_irqsave(&md->db_lock, flags); + list_add_tail(&db_filter->priv_node, &priv->db_filters); + list_add_tail(&db_filter->data_node, &md->doorbells); + spin_unlock_irqrestore(&md->db_lock, flags); + + return 0; +} + +static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) +{ + list_del(&db_filter->data_node); + list_del(&db_filter->priv_node); + kfree(db_filter); +} + +static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct rio_mport_db_filter *db_filter; + struct rio_doorbell_filter filter; + unsigned long flags; + int ret = -EINVAL; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + if (filter.low > filter.high) + return -EINVAL; + + spin_lock_irqsave(&priv->md->db_lock, flags); + list_for_each_entry(db_filter, &priv->db_filters, priv_node) { + if (db_filter->filter.rioid == filter.rioid && + db_filter->filter.low == filter.low && + db_filter->filter.high == filter.high) { + rio_mport_delete_db_filter(db_filter); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&priv->md->db_lock, flags); + + if (!ret) + rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); + + return ret; +} + +static int rio_mport_match_pw(union rio_pw_msg *msg, + struct rio_pw_filter *filter) +{ + if ((msg->em.comptag & filter->mask) < filter->low || + (msg->em.comptag & filter->mask) > filter->high) + return 0; + return 1; +} + +static int rio_mport_pw_handler(struct rio_mport *mport, void *context, + union rio_pw_msg *msg, int step) +{ + struct mport_dev *md = context; + struct mport_cdev_priv *priv; + struct rio_mport_pw_filter *pw_filter; + struct rio_event event; + int handled; + + event.header = RIO_PORTWRITE; + memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); + + handled = 0; + spin_lock(&md->pw_lock); + list_for_each_entry(pw_filter, &md->portwrites, md_node) { + if (rio_mport_match_pw(msg, &pw_filter->filter)) { + priv = pw_filter->priv; + rio_mport_add_event(priv, &event); + handled = 1; + } + } + spin_unlock(&md->pw_lock); + + if (!handled) { + printk_ratelimited(KERN_WARNING DRV_NAME + ": mport%d received spurious PW from 0x%08x\n", + mport->id, msg->em.comptag); + } + + return 0; +} + +static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_pw_filter *pw_filter; + struct rio_pw_filter filter; + unsigned long flags; + int hadd = 0; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL); + if (pw_filter == NULL) + return -ENOMEM; + + pw_filter->filter = filter; + pw_filter->priv = priv; + spin_lock_irqsave(&md->pw_lock, flags); + if (list_empty(&md->portwrites)) + hadd = 1; + list_add_tail(&pw_filter->priv_node, &priv->pw_filters); + list_add_tail(&pw_filter->md_node, &md->portwrites); + spin_unlock_irqrestore(&md->pw_lock, flags); + + if (hadd) { + int ret; + + ret = rio_add_mport_pw_handler(md->mport, md, + rio_mport_pw_handler); + if (ret) { + dev_err(&md->dev, + "%s: failed to add IB_PW handler, err=%d\n", + __func__, ret); + return ret; + } + rio_pw_enable(md->mport, 1); + } + + return 0; +} + +static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) +{ + list_del(&pw_filter->md_node); + list_del(&pw_filter->priv_node); + kfree(pw_filter); +} + +static int rio_mport_match_pw_filter(struct rio_pw_filter *a, + struct rio_pw_filter *b) +{ + if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) + return 1; + return 0; +} + +static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_pw_filter *pw_filter; + struct rio_pw_filter filter; + unsigned long flags; + int ret = -EINVAL; + int hdel = 0; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + spin_lock_irqsave(&md->pw_lock, flags); + list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { + if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { + rio_mport_delete_pw_filter(pw_filter); + ret = 0; + break; + } + } + + if (list_empty(&md->portwrites)) + hdel = 1; + spin_unlock_irqrestore(&md->pw_lock, flags); + + if (hdel) { + rio_del_mport_pw_handler(md->mport, priv->md, + rio_mport_pw_handler); + rio_pw_enable(md->mport, 0); + } + + return ret; +} + +/* + * rio_release_dev - release routine for kernel RIO device object + * @dev: kernel device object associated with a RIO device structure + * + * Frees a RIO device struct associated a RIO device struct. + * The RIO device struct is freed. + */ +static void rio_release_dev(struct device *dev) +{ + struct rio_dev *rdev; + + rdev = to_rio_dev(dev); + pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); + kfree(rdev); +} + + +static void rio_release_net(struct device *dev) +{ + struct rio_net *net; + + net = to_rio_net(dev); + rmcd_debug(RDEV, "net_%d", net->id); + kfree(net); +} + + +/* + * rio_mport_add_riodev - creates a kernel RIO device object + * + * Allocates a RIO device data structure and initializes required fields based + * on device's configuration space contents. + * If the device has switch capabilities, then a switch specific portion is + * allocated and configured. + */ +static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_rdev_info dev_info; + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + struct rio_mport *mport; + struct device *dev; + size_t size; + u32 rval; + u32 swpinfo = 0; + u16 destid; + u8 hopcount; + int err; + + if (copy_from_user(&dev_info, arg, sizeof(dev_info))) + return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; + + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, + dev_info.comptag, dev_info.destid, dev_info.hopcount); + + dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); + if (dev) { + rmcd_debug(RDEV, "device %s already exists", dev_info.name); + put_device(dev); + return -EEXIST; + } + + size = sizeof(*rdev); + mport = md->mport; + destid = dev_info.destid; + hopcount = dev_info.hopcount; + + if (rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &rval)) + return -EIO; + + if (rval & RIO_PEF_SWITCH) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &swpinfo); + size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); + } + + rdev = kzalloc(size, GFP_KERNEL); + if (rdev == NULL) + return -ENOMEM; + + if (mport->net == NULL) { + struct rio_net *net; + + net = rio_alloc_net(mport); + if (!net) { + err = -ENOMEM; + rmcd_debug(RDEV, "failed to allocate net object"); + goto cleanup; + } + + net->id = mport->id; + net->hport = mport; + dev_set_name(&net->dev, "rnet_%d", net->id); + net->dev.parent = &mport->dev; + net->dev.release = rio_release_net; + err = rio_add_net(net); + if (err) { + rmcd_debug(RDEV, "failed to register net, err=%d", err); + kfree(net); + goto cleanup; + } + } + + rdev->net = mport->net; + rdev->pef = rval; + rdev->swpinfo = swpinfo; + rio_mport_read_config_32(mport, destid, hopcount, + RIO_DEV_ID_CAR, &rval); + rdev->did = rval >> 16; + rdev->vid = rval & 0xffff; + rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, + &rdev->device_rev); + rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, + &rval); + rdev->asm_did = rval >> 16; + rdev->asm_vid = rval & 0xffff; + rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, + &rval); + rdev->asm_rev = rval >> 16; + + if (rdev->pef & RIO_PEF_EXT_FEATURES) { + rdev->efptr = rval & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, + hopcount, &rdev->phys_rmap); + + rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + } + + rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, + &rdev->src_ops); + rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, + &rdev->dst_ops); + + rdev->comp_tag = dev_info.comptag; + rdev->destid = destid; + /* hopcount is stored as specified by a caller, regardles of EP or SW */ + rdev->hopcount = hopcount; + + if (rdev->pef & RIO_PEF_SWITCH) { + rswitch = rdev->rswitch; + rswitch->route_table = NULL; + } + + if (strlen(dev_info.name)) + dev_set_name(&rdev->dev, "%s", dev_info.name); + else if (rdev->pef & RIO_PEF_SWITCH) + dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + else + dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + + INIT_LIST_HEAD(&rdev->net_list); + rdev->dev.parent = &mport->net->dev; + rio_attach_device(rdev); + rdev->dev.release = rio_release_dev; + + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) + rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], + 0, 0xffff); + err = rio_add_device(rdev); + if (err) { + put_device(&rdev->dev); + return err; + } + + rio_dev_get(rdev); + + return 0; +cleanup: + kfree(rdev); + return err; +} + +static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) +{ + struct rio_rdev_info dev_info; + struct rio_dev *rdev = NULL; + struct device *dev; + struct rio_mport *mport; + struct rio_net *net; + + if (copy_from_user(&dev_info, arg, sizeof(dev_info))) + return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; + + mport = priv->md->mport; + + /* If device name is specified, removal by name has priority */ + if (strlen(dev_info.name)) { + dev = bus_find_device_by_name(&rio_bus_type, NULL, + dev_info.name); + if (dev) + rdev = to_rio_dev(dev); + } else { + do { + rdev = rio_get_comptag(dev_info.comptag, rdev); + if (rdev && rdev->dev.parent == &mport->net->dev && + rdev->destid == dev_info.destid && + rdev->hopcount == dev_info.hopcount) + break; + } while (rdev); + } + + if (!rdev) { + rmcd_debug(RDEV, + "device name:%s ct:0x%x did:0x%x hc:0x%x not found", + dev_info.name, dev_info.comptag, dev_info.destid, + dev_info.hopcount); + return -ENODEV; + } + + net = rdev->net; + rio_dev_put(rdev); + rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); + + if (list_empty(&net->devices)) { + rio_free_net(net); + mport->net = NULL; + } + + return 0; +} + +/* + * Mport cdev management + */ + +/* + * mport_cdev_open() - Open character device (mport) + */ +static int mport_cdev_open(struct inode *inode, struct file *filp) +{ + int ret; + int minor = iminor(inode); + struct mport_dev *chdev; + struct mport_cdev_priv *priv; + + /* Test for valid device */ + if (minor >= RIO_MAX_MPORTS) { + rmcd_error("Invalid minor device number"); + return -EINVAL; + } + + chdev = container_of(inode->i_cdev, struct mport_dev, cdev); + + rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); + + if (atomic_read(&chdev->active) == 0) + return -ENODEV; + + get_device(&chdev->dev); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + put_device(&chdev->dev); + return -ENOMEM; + } + + priv->md = chdev; + + INIT_LIST_HEAD(&priv->db_filters); + INIT_LIST_HEAD(&priv->pw_filters); + spin_lock_init(&priv->fifo_lock); + init_waitqueue_head(&priv->event_rx_wait); + ret = kfifo_alloc(&priv->event_fifo, + sizeof(struct rio_event) * MPORT_EVENT_DEPTH, + GFP_KERNEL); + if (ret < 0) { + put_device(&chdev->dev); + dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); + ret = -ENOMEM; + goto err_fifo; + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + INIT_LIST_HEAD(&priv->async_list); + spin_lock_init(&priv->req_lock); + mutex_init(&priv->dma_lock); +#endif + mutex_lock(&chdev->file_mutex); + list_add_tail(&priv->list, &chdev->file_list); + mutex_unlock(&chdev->file_mutex); + + filp->private_data = priv; + goto out; +err_fifo: + kfree(priv); +out: + return ret; +} + +static int mport_cdev_fasync(int fd, struct file *filp, int mode) +{ + struct mport_cdev_priv *priv = filp->private_data; + + return fasync_helper(fd, filp, mode, &priv->async_queue); +} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +static void mport_cdev_release_dma(struct file *filp) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md; + struct mport_dma_req *req, *req_next; + unsigned long tmo = msecs_to_jiffies(dma_timeout); + long wret; + LIST_HEAD(list); + + rmcd_debug(EXIT, "from filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + + if (!priv->dmach) { + rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); + return; + } + + md = priv->md; + + spin_lock(&priv->req_lock); + if (!list_empty(&priv->async_list)) { + rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + list_splice_init(&priv->async_list, &list); + } + spin_unlock(&priv->req_lock); + + if (!list_empty(&list)) { + rmcd_debug(EXIT, "temp list not empty"); + list_for_each_entry_safe(req, req_next, &list, node) { + rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", + req->filp, req->cookie, + completion_done(&req->req_comp)?"yes":"no"); + list_del(&req->node); + kref_put(&req->refcount, dma_req_free); + } + } + + put_dma_channel(priv); + wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); + + if (wret <= 0) { + rmcd_error("%s(%d) failed waiting for DMA release err=%ld", + current->comm, task_pid_nr(current), wret); + } + + if (priv->dmach != priv->md->dma_chan) { + rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + rio_release_dma(priv->dmach); + } else { + rmcd_debug(EXIT, "Adjust default DMA channel refcount"); + kref_put(&md->dma_ref, mport_release_def_dma); + } + + priv->dmach = NULL; +} +#else +#define mport_cdev_release_dma(priv) do {} while (0) +#endif + +/* + * mport_cdev_release() - Release character device + */ +static int mport_cdev_release(struct inode *inode, struct file *filp) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *chdev; + struct rio_mport_pw_filter *pw_filter, *pw_filter_next; + struct rio_mport_db_filter *db_filter, *db_filter_next; + struct rio_mport_mapping *map, *_map; + unsigned long flags; + + rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); + + chdev = priv->md; + mport_cdev_release_dma(filp); + + priv->event_mask = 0; + + spin_lock_irqsave(&chdev->pw_lock, flags); + if (!list_empty(&priv->pw_filters)) { + list_for_each_entry_safe(pw_filter, pw_filter_next, + &priv->pw_filters, priv_node) + rio_mport_delete_pw_filter(pw_filter); + } + spin_unlock_irqrestore(&chdev->pw_lock, flags); + + spin_lock_irqsave(&chdev->db_lock, flags); + list_for_each_entry_safe(db_filter, db_filter_next, + &priv->db_filters, priv_node) { + rio_mport_delete_db_filter(db_filter); + } + spin_unlock_irqrestore(&chdev->db_lock, flags); + + kfifo_free(&priv->event_fifo); + + mutex_lock(&chdev->buf_mutex); + list_for_each_entry_safe(map, _map, &chdev->mappings, node) { + if (map->filp == filp) { + rmcd_debug(EXIT, "release mapping %p filp=%p", + map->virt_addr, filp); + kref_put(&map->ref, mport_release_mapping); + } + } + mutex_unlock(&chdev->buf_mutex); + + mport_cdev_fasync(-1, filp, 0); + filp->private_data = NULL; + mutex_lock(&chdev->file_mutex); + list_del(&priv->list); + mutex_unlock(&chdev->file_mutex); + put_device(&chdev->dev); + kfree(priv); + return 0; +} + +/* + * mport_cdev_ioctl() - IOCTLs for character device + */ +static long mport_cdev_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = -EINVAL; + struct mport_cdev_priv *data = filp->private_data; + struct mport_dev *md = data->md; + + if (atomic_read(&md->active) == 0) + return -ENODEV; + + switch (cmd) { + case RIO_MPORT_MAINT_READ_LOCAL: + return rio_mport_maint_rd(data, (void __user *)arg, 1); + case RIO_MPORT_MAINT_WRITE_LOCAL: + return rio_mport_maint_wr(data, (void __user *)arg, 1); + case RIO_MPORT_MAINT_READ_REMOTE: + return rio_mport_maint_rd(data, (void __user *)arg, 0); + case RIO_MPORT_MAINT_WRITE_REMOTE: + return rio_mport_maint_wr(data, (void __user *)arg, 0); + case RIO_MPORT_MAINT_HDID_SET: + return maint_hdid_set(data, (void __user *)arg); + case RIO_MPORT_MAINT_COMPTAG_SET: + return maint_comptag_set(data, (void __user *)arg); + case RIO_MPORT_MAINT_PORT_IDX_GET: + return maint_port_idx_get(data, (void __user *)arg); + case RIO_MPORT_GET_PROPERTIES: + md->properties.hdid = md->mport->host_deviceid; + if (copy_to_user((void __user *)arg, &(md->properties), + sizeof(md->properties))) + return -EFAULT; + return 0; + case RIO_ENABLE_DOORBELL_RANGE: + return rio_mport_add_db_filter(data, (void __user *)arg); + case RIO_DISABLE_DOORBELL_RANGE: + return rio_mport_remove_db_filter(data, (void __user *)arg); + case RIO_ENABLE_PORTWRITE_RANGE: + return rio_mport_add_pw_filter(data, (void __user *)arg); + case RIO_DISABLE_PORTWRITE_RANGE: + return rio_mport_remove_pw_filter(data, (void __user *)arg); + case RIO_SET_EVENT_MASK: + data->event_mask = (u32)arg; + return 0; + case RIO_GET_EVENT_MASK: + if (copy_to_user((void __user *)arg, &data->event_mask, + sizeof(u32))) + return -EFAULT; + return 0; + case RIO_MAP_OUTBOUND: + return rio_mport_obw_map(filp, (void __user *)arg); + case RIO_MAP_INBOUND: + return rio_mport_map_inbound(filp, (void __user *)arg); + case RIO_UNMAP_OUTBOUND: + return rio_mport_obw_free(filp, (void __user *)arg); + case RIO_UNMAP_INBOUND: + return rio_mport_inbound_free(filp, (void __user *)arg); + case RIO_ALLOC_DMA: + return rio_mport_alloc_dma(filp, (void __user *)arg); + case RIO_FREE_DMA: + return rio_mport_free_dma(filp, (void __user *)arg); + case RIO_WAIT_FOR_ASYNC: + return rio_mport_wait_for_async_dma(filp, (void __user *)arg); + case RIO_TRANSFER: + return rio_mport_transfer_ioctl(filp, (void __user *)arg); + case RIO_DEV_ADD: + return rio_mport_add_riodev(data, (void __user *)arg); + case RIO_DEV_DEL: + return rio_mport_del_riodev(data, (void __user *)arg); + default: + break; + } + + return err; +} + +/* + * mport_release_mapping - free mapping resources and info structure + * @ref: a pointer to the kref within struct rio_mport_mapping + * + * NOTE: Shall be called while holding buf_mutex. + */ +static void mport_release_mapping(struct kref *ref) +{ + struct rio_mport_mapping *map = + container_of(ref, struct rio_mport_mapping, ref); + struct rio_mport *mport = map->md->mport; + + rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", + map->dir, map->virt_addr, + &map->phys_addr, mport->name); + + list_del(&map->node); + + switch (map->dir) { + case MAP_INBOUND: + rio_unmap_inb_region(mport, map->phys_addr); + fallthrough; + case MAP_DMA: + dma_free_coherent(mport->dev.parent, map->size, + map->virt_addr, map->phys_addr); + break; + case MAP_OUTBOUND: + rio_unmap_outb_region(mport, map->rioid, map->rio_addr); + break; + } + kfree(map); +} + +static void mport_mm_open(struct vm_area_struct *vma) +{ + struct rio_mport_mapping *map = vma->vm_private_data; + + rmcd_debug(MMAP, "%pad", &map->phys_addr); + kref_get(&map->ref); +} + +static void mport_mm_close(struct vm_area_struct *vma) +{ + struct rio_mport_mapping *map = vma->vm_private_data; + + rmcd_debug(MMAP, "%pad", &map->phys_addr); + mutex_lock(&map->md->buf_mutex); + kref_put(&map->ref, mport_release_mapping); + mutex_unlock(&map->md->buf_mutex); +} + +static const struct vm_operations_struct vm_ops = { + .open = mport_mm_open, + .close = mport_mm_close, +}; + +static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md; + size_t size = vma->vm_end - vma->vm_start; + dma_addr_t baddr; + unsigned long offset; + int found = 0, ret; + struct rio_mport_mapping *map; + + rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", + (unsigned int)size, vma->vm_pgoff); + + md = priv->md; + baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (baddr >= map->phys_addr && + baddr < (map->phys_addr + map->size)) { + found = 1; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (!found) + return -ENOMEM; + + offset = baddr - map->phys_addr; + + if (size + offset > map->size) + return -EINVAL; + + vma->vm_pgoff = offset >> PAGE_SHIFT; + rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); + + if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) + ret = dma_mmap_coherent(md->mport->dev.parent, vma, + map->virt_addr, map->phys_addr, map->size); + else if (map->dir == MAP_OUTBOUND) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = vm_iomap_memory(vma, map->phys_addr, map->size); + } else { + rmcd_error("Attempt to mmap unsupported mapping type"); + ret = -EIO; + } + + if (!ret) { + vma->vm_private_data = map; + vma->vm_ops = &vm_ops; + mport_mm_open(vma); + } else { + rmcd_error("MMAP exit with err=%d", ret); + } + + return ret; +} + +static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) +{ + struct mport_cdev_priv *priv = filp->private_data; + + poll_wait(filp, &priv->event_rx_wait, wait); + if (kfifo_len(&priv->event_fifo)) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, + loff_t *ppos) +{ + struct mport_cdev_priv *priv = filp->private_data; + int copied; + ssize_t ret; + + if (!count) + return 0; + + if (kfifo_is_empty(&priv->event_fifo) && + (filp->f_flags & O_NONBLOCK)) + return -EAGAIN; + + if (count % sizeof(struct rio_event)) + return -EINVAL; + + ret = wait_event_interruptible(priv->event_rx_wait, + kfifo_len(&priv->event_fifo) != 0); + if (ret) + return ret; + + while (ret < count) { + if (kfifo_to_user(&priv->event_fifo, buf, + sizeof(struct rio_event), &copied)) + return -EFAULT; + ret += copied; + buf += copied; + } + + return ret; +} + +static ssize_t mport_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct rio_mport *mport = priv->md->mport; + struct rio_event event; + int len, ret; + + if (!count) + return 0; + + if (count % sizeof(event)) + return -EINVAL; + + len = 0; + while ((count - len) >= (int)sizeof(event)) { + if (copy_from_user(&event, buf, sizeof(event))) + return -EFAULT; + + if (event.header != RIO_DOORBELL) + return -EINVAL; + + ret = rio_mport_send_doorbell(mport, + event.u.doorbell.rioid, + event.u.doorbell.payload); + if (ret < 0) + return ret; + + len += sizeof(event); + buf += sizeof(event); + } + + return len; +} + +static const struct file_operations mport_fops = { + .owner = THIS_MODULE, + .open = mport_cdev_open, + .release = mport_cdev_release, + .poll = mport_cdev_poll, + .read = mport_read, + .write = mport_write, + .mmap = mport_cdev_mmap, + .fasync = mport_cdev_fasync, + .unlocked_ioctl = mport_cdev_ioctl +}; + +/* + * Character device management + */ + +static void mport_device_release(struct device *dev) +{ + struct mport_dev *md; + + rmcd_debug(EXIT, "%s", dev_name(dev)); + md = container_of(dev, struct mport_dev, dev); + kfree(md); +} + +/* + * mport_cdev_add() - Create mport_dev from rio_mport + * @mport: RapidIO master port + */ +static struct mport_dev *mport_cdev_add(struct rio_mport *mport) +{ + int ret = 0; + struct mport_dev *md; + struct rio_mport_attr attr; + + md = kzalloc(sizeof(*md), GFP_KERNEL); + if (!md) { + rmcd_error("Unable allocate a device object"); + return NULL; + } + + md->mport = mport; + mutex_init(&md->buf_mutex); + mutex_init(&md->file_mutex); + INIT_LIST_HEAD(&md->file_list); + + device_initialize(&md->dev); + md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); + md->dev.class = dev_class; + md->dev.parent = &mport->dev; + md->dev.release = mport_device_release; + dev_set_name(&md->dev, DEV_NAME "%d", mport->id); + atomic_set(&md->active, 1); + + cdev_init(&md->cdev, &mport_fops); + md->cdev.owner = THIS_MODULE; + + INIT_LIST_HEAD(&md->doorbells); + spin_lock_init(&md->db_lock); + INIT_LIST_HEAD(&md->portwrites); + spin_lock_init(&md->pw_lock); + INIT_LIST_HEAD(&md->mappings); + + md->properties.id = mport->id; + md->properties.sys_size = mport->sys_size; + md->properties.hdid = mport->host_deviceid; + md->properties.index = mport->index; + + /* The transfer_mode property will be returned through mport query + * interface + */ +#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ + md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; +#else + md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; +#endif + + ret = cdev_device_add(&md->cdev, &md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } + ret = rio_query_mport(mport, &attr); + if (!ret) { + md->properties.flags = attr.flags; + md->properties.link_speed = attr.link_speed; + md->properties.link_width = attr.link_width; + md->properties.dma_max_sge = attr.dma_max_sge; + md->properties.dma_max_size = attr.dma_max_size; + md->properties.dma_align = attr.dma_align; + md->properties.cap_sys_size = 0; + md->properties.cap_transfer_mode = 0; + md->properties.cap_addr_size = 0; + } else + pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", + mport->name, MAJOR(dev_number), mport->id); + + mutex_lock(&mport_devs_lock); + list_add_tail(&md->node, &mport_devs); + mutex_unlock(&mport_devs_lock); + + pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", + mport->name, MAJOR(dev_number), mport->id); + + return md; + +err_cdev: + put_device(&md->dev); + return NULL; +} + +/* + * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release + * associated DMA channels. + */ +static void mport_cdev_terminate_dma(struct mport_dev *md) +{ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct mport_cdev_priv *client; + + rmcd_debug(DMA, "%s", dev_name(&md->dev)); + + mutex_lock(&md->file_mutex); + list_for_each_entry(client, &md->file_list, list) { + if (client->dmach) { + dmaengine_terminate_all(client->dmach); + rio_release_dma(client->dmach); + } + } + mutex_unlock(&md->file_mutex); + + if (md->dma_chan) { + dmaengine_terminate_all(md->dma_chan); + rio_release_dma(md->dma_chan); + md->dma_chan = NULL; + } +#endif +} + + +/* + * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open + * mport_cdev files. + */ +static int mport_cdev_kill_fasync(struct mport_dev *md) +{ + unsigned int files = 0; + struct mport_cdev_priv *client; + + mutex_lock(&md->file_mutex); + list_for_each_entry(client, &md->file_list, list) { + if (client->async_queue) + kill_fasync(&client->async_queue, SIGIO, POLL_HUP); + files++; + } + mutex_unlock(&md->file_mutex); + return files; +} + +/* + * mport_cdev_remove() - Remove mport character device + * @dev: Mport device to remove + */ +static void mport_cdev_remove(struct mport_dev *md) +{ + struct rio_mport_mapping *map, *_map; + + rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); + atomic_set(&md->active, 0); + mport_cdev_terminate_dma(md); + rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); + cdev_device_del(&md->cdev, &md->dev); + mport_cdev_kill_fasync(md); + + /* TODO: do we need to give clients some time to close file + * descriptors? Simple wait for XX, or kref? + */ + + /* + * Release DMA buffers allocated for the mport device. + * Disable associated inbound Rapidio requests mapping if applicable. + */ + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + kref_put(&map->ref, mport_release_mapping); + } + mutex_unlock(&md->buf_mutex); + + if (!list_empty(&md->mappings)) + rmcd_warn("WARNING: %s pending mappings on removal", + md->mport->name); + + rio_release_inb_dbell(md->mport, 0, 0x0fff); + + put_device(&md->dev); +} + +/* + * RIO rio_mport_interface driver + */ + +/* + * mport_add_mport() - Add rio_mport from LDM device struct + * @dev: Linux device model struct + * @class_intf: Linux class_interface + */ +static int mport_add_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = NULL; + struct mport_dev *chdev = NULL; + + mport = to_rio_mport(dev); + if (!mport) + return -ENODEV; + + chdev = mport_cdev_add(mport); + if (!chdev) + return -ENODEV; + + return 0; +} + +/* + * mport_remove_mport() - Remove rio_mport from global list + * TODO remove device from global mport_dev list + */ +static void mport_remove_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = NULL; + struct mport_dev *chdev; + int found = 0; + + mport = to_rio_mport(dev); + rmcd_debug(EXIT, "Remove %s", mport->name); + + mutex_lock(&mport_devs_lock); + list_for_each_entry(chdev, &mport_devs, node) { + if (chdev->mport->id == mport->id) { + atomic_set(&chdev->active, 0); + list_del(&chdev->node); + found = 1; + break; + } + } + mutex_unlock(&mport_devs_lock); + + if (found) + mport_cdev_remove(chdev); +} + +/* the rio_mport_interface is used to handle local mport devices */ +static struct class_interface rio_mport_interface __refdata = { + .class = &rio_mport_class, + .add_dev = mport_add_mport, + .remove_dev = mport_remove_mport, +}; + +/* + * Linux kernel module + */ + +/* + * mport_init - Driver module loading + */ +static int __init mport_init(void) +{ + int ret; + + /* Create device class needed by udev */ + dev_class = class_create(THIS_MODULE, DRV_NAME); + if (IS_ERR(dev_class)) { + rmcd_error("Unable to create " DRV_NAME " class"); + return PTR_ERR(dev_class); + } + + ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); + if (ret < 0) + goto err_chr; + + rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); + + /* Register to rio_mport_interface */ + ret = class_interface_register(&rio_mport_interface); + if (ret) { + rmcd_error("class_interface_register() failed, err=%d", ret); + goto err_cli; + } + + return 0; + +err_cli: + unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); +err_chr: + class_destroy(dev_class); + return ret; +} + +/** + * mport_exit - Driver module unloading + */ +static void __exit mport_exit(void) +{ + class_interface_unregister(&rio_mport_interface); + class_destroy(dev_class); + unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); +} + +module_init(mport_init); +module_exit(mport_exit); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c new file mode 100644 index 000000000..4dd31dd9f --- /dev/null +++ b/drivers/rapidio/devices/tsi721.c @@ -0,0 +1,3002 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright 2011 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * Chul Kim <chul.kim@idt.com> + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/delay.h> + +#include "tsi721.h" + +#ifdef DEBUG +u32 tsi_dbg_level; +module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif + +static int pcie_mrrs = -1; +module_param(pcie_mrrs, int, S_IRUGO); +MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)"); + +static u8 mbox_sel = 0x0f; +module_param(mbox_sel, byte, S_IRUGO); +MODULE_PARM_DESC(mbox_sel, + "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); + +static DEFINE_SPINLOCK(tsi721_maint_lock); + +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); + +/** + * tsi721_lcread - read from local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be read into + * + * Generates a local SREP space read. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, + int len, u32 *data) +{ + struct tsi721_device *priv = mport->priv; + + if (len != sizeof(u32)) + return -EINVAL; /* only 32-bit access is supported */ + + *data = ioread32(priv->regs + offset); + + return 0; +} + +/** + * tsi721_lcwrite - write into local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be written + * + * Generates a local write into SREP configuration space. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, + int len, u32 data) +{ + struct tsi721_device *priv = mport->priv; + + if (len != sizeof(u32)) + return -EINVAL; /* only 32-bit access is supported */ + + iowrite32(data, priv->regs + offset); + + return 0; +} + +/** + * tsi721_maint_dma - Helper function to generate RapidIO maintenance + * transactions using designated Tsi721 DMA channel. + * @priv: pointer to tsi721 private data + * @sys_size: RapdiIO transport system size + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Location to be read from or write into + * @do_wr: Operation flag (1 == MAINT_WR) + * + * Generates a RapidIO maintenance transaction (Read or Write). + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, + u16 destid, u8 hopcount, u32 offset, int len, + u32 *data, int do_wr) +{ + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); + struct tsi721_dma_desc *bd_ptr; + u32 rd_count, swr_ptr, ch_stat; + unsigned long flags; + int i, err = 0; + u32 op = do_wr ? MAINT_WR : MAINT_RD; + + if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) + return -EINVAL; + + spin_lock_irqsave(&tsi721_maint_lock, flags); + + bd_ptr = priv->mdma.bd_base; + + rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); + + /* Initialize DMA descriptor */ + bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); + bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); + bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); + bd_ptr[0].raddr_hi = 0; + if (do_wr) + bd_ptr[0].data[0] = cpu_to_be32p(data); + else + bd_ptr[0].data[0] = 0xffffffff; + + mb(); + + /* Start DMA operation */ + iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); + ioread32(regs + TSI721_DMAC_DWRCNT); + i = 0; + + /* Wait until DMA transfer is finished */ + while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) + & TSI721_DMAC_STS_RUN) { + udelay(1); + if (++i >= 5000000) { + tsi_debug(MAINT, &priv->pdev->dev, + "DMA[%d] read timeout ch_status=%x", + priv->mdma.ch_id, ch_stat); + if (!do_wr) + *data = 0xffffffff; + err = -EIO; + goto err_out; + } + } + + if (ch_stat & TSI721_DMAC_STS_ABORT) { + /* If DMA operation aborted due to error, + * reinitialize DMA channel + */ + tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x", + ch_stat); + tsi_debug(MAINT, &priv->pdev->dev, + "OP=%d : destid=%x hc=%x off=%x", + do_wr ? MAINT_WR : MAINT_RD, + destid, hopcount, offset); + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + udelay(10); + iowrite32(0, regs + TSI721_DMAC_DWRCNT); + udelay(1); + if (!do_wr) + *data = 0xffffffff; + err = -EIO; + goto err_out; + } + + if (!do_wr) + *data = be32_to_cpu(bd_ptr[0].data[0]); + + /* + * Update descriptor status FIFO RD pointer. + * NOTE: Skipping check and clear FIFO entries because we are waiting + * for transfer to be completed. + */ + swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); + iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); + +err_out: + spin_unlock_irqrestore(&tsi721_maint_lock, flags); + + return err; +} + +/** + * tsi721_cread_dma - Generate a RapidIO maintenance read transaction + * using Tsi721 BDMA engine. + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Location to be read into + * + * Generates a RapidIO maintenance read transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *data) +{ + struct tsi721_device *priv = mport->priv; + + return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, + offset, len, data, 0); +} + +/** + * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction + * using Tsi721 BDMA engine + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Value to be written + * + * Generates a RapidIO maintenance write transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 data) +{ + struct tsi721_device *priv = mport->priv; + u32 temp = data; + + return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, + offset, len, &temp, 1); +} + +/** + * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler + * @priv: tsi721 device private structure + * + * Handles inbound port-write interrupts. Copies PW message from an internal + * buffer into PW message FIFO and schedules deferred routine to process + * queued messages. + */ +static int +tsi721_pw_handler(struct tsi721_device *priv) +{ + u32 pw_stat; + u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; + + + pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); + + if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { + pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); + pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); + pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); + pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); + + /* Queue PW message (if there is room in FIFO), + * otherwise discard it. + */ + spin_lock(&priv->pw_fifo_lock); + if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) + kfifo_in(&priv->pw_fifo, pw_buf, + TSI721_RIO_PW_MSG_SIZE); + else + priv->pw_discard_count++; + spin_unlock(&priv->pw_fifo_lock); + } + + /* Clear pending PW interrupts */ + iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, + priv->regs + TSI721_RIO_PW_RX_STAT); + + schedule_work(&priv->pw_work); + + return 0; +} + +static void tsi721_pw_dpc(struct work_struct *work) +{ + struct tsi721_device *priv = container_of(work, struct tsi721_device, + pw_work); + union rio_pw_msg pwmsg; + + /* + * Process port-write messages + */ + while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg, + TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { + /* Pass the port-write message to RIO core for processing */ + rio_inb_pwrite_handler(&priv->mport, &pwmsg); + } +} + +/** + * tsi721_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable: 1=enable; 0=disable port-write message handling + */ +static int tsi721_pw_enable(struct rio_mport *mport, int enable) +{ + struct tsi721_device *priv = mport->priv; + u32 rval; + + rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); + + if (enable) + rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; + else + rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; + + /* Clear pending PW interrupts */ + iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, + priv->regs + TSI721_RIO_PW_RX_STAT); + /* Update enable bits */ + iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); + + return 0; +} + +/** + * tsi721_dsend - Send a RapidIO doorbell + * @mport: RapidIO master port info + * @index: ID of RapidIO interface + * @destid: Destination ID of target device + * @data: 16-bit info field of RapidIO doorbell + * + * Sends a RapidIO doorbell message. Always returns %0. + */ +static int tsi721_dsend(struct rio_mport *mport, int index, + u16 destid, u16 data) +{ + struct tsi721_device *priv = mport->priv; + u32 offset; + + offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | + (destid << 2); + + tsi_debug(DBELL, &priv->pdev->dev, + "Send Doorbell 0x%04x to destID 0x%x", data, destid); + iowrite16be(data, priv->odb_base + offset); + + return 0; +} + +/** + * tsi721_dbell_handler - Tsi721 doorbell interrupt handler + * @priv: tsi721 device-specific data structure + * + * Handles inbound doorbell interrupts. Copies doorbell entry from an internal + * buffer into DB message FIFO and schedules deferred routine to process + * queued DBs. + */ +static int +tsi721_dbell_handler(struct tsi721_device *priv) +{ + u32 regval; + + /* Disable IDB interrupts */ + regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + regval &= ~TSI721_SR_CHINT_IDBQRCV; + iowrite32(regval, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + schedule_work(&priv->idb_work); + + return 0; +} + +static void tsi721_db_dpc(struct work_struct *work) +{ + struct tsi721_device *priv = container_of(work, struct tsi721_device, + idb_work); + struct rio_mport *mport; + struct rio_dbell *dbell; + int found = 0; + u32 wr_ptr, rd_ptr; + u64 *idb_entry; + u32 regval; + union { + u64 msg; + u8 bytes[8]; + } idb; + + /* + * Process queued inbound doorbells + */ + mport = &priv->mport; + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; + + while (wr_ptr != rd_ptr) { + idb_entry = (u64 *)(priv->idb_base + + (TSI721_IDB_ENTRY_SIZE * rd_ptr)); + rd_ptr++; + rd_ptr %= IDB_QSIZE; + idb.msg = *idb_entry; + *idb_entry = 0; + + /* Process one doorbell */ + list_for_each_entry(dbell, &mport->dbells, node) { + if ((dbell->res->start <= DBELL_INF(idb.bytes)) && + (dbell->res->end >= DBELL_INF(idb.bytes))) { + found = 1; + break; + } + } + + if (found) { + dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), + DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); + } else { + tsi_debug(DBELL, &priv->pdev->dev, + "spurious IDB sid %2.2x tid %2.2x info %4.4x", + DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), + DBELL_INF(idb.bytes)); + } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + } + + iowrite32(rd_ptr & (IDB_QSIZE - 1), + priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + + /* Re-enable IDB interrupts */ + regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + regval |= TSI721_SR_CHINT_IDBQRCV; + iowrite32(regval, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); +} + +/** + * tsi721_irqhandler - Tsi721 interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) + * + * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported + * interrupt events and calls an event-specific handler(s). + */ +static irqreturn_t tsi721_irqhandler(int irq, void *ptr) +{ + struct tsi721_device *priv = (struct tsi721_device *)ptr; + u32 dev_int; + u32 dev_ch_int; + u32 intval; + u32 ch_inte; + + /* For MSI mode disable all device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) + iowrite32(0, priv->regs + TSI721_DEV_INTE); + + dev_int = ioread32(priv->regs + TSI721_DEV_INT); + if (!dev_int) + return IRQ_NONE; + + dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); + + if (dev_int & TSI721_DEV_INT_SR2PC_CH) { + /* Service SR2PC Channel interrupts */ + if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { + /* Service Inbound Doorbell interrupt */ + intval = ioread32(priv->regs + + TSI721_SR_CHINT(IDB_QUEUE)); + if (intval & TSI721_SR_CHINT_IDBQRCV) + tsi721_dbell_handler(priv); + else + tsi_info(&priv->pdev->dev, + "Unsupported SR_CH_INT %x", intval); + + /* Clear interrupts */ + iowrite32(intval, + priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + } + } + + if (dev_int & TSI721_DEV_INT_SMSG_CH) { + int ch; + + /* + * Service channel interrupts from Messaging Engine + */ + + if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ + /* Disable signaled OB MSG Channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + + /* + * Process Inbound Message interrupt for each MBOX + */ + for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { + if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) + continue; + tsi721_imsg_handler(priv, ch); + } + } + + if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ + /* Disable signaled OB MSG Channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + + /* + * Process Outbound Message interrupts for each MBOX + */ + + for (ch = 0; ch < RIO_MAX_MBOX; ch++) { + if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) + continue; + tsi721_omsg_handler(priv, ch); + } + } + } + + if (dev_int & TSI721_DEV_INT_SRIO) { + /* Service SRIO MAC interrupts */ + intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); + if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) + tsi721_pw_handler(priv); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + if (dev_int & TSI721_DEV_INT_BDMA_CH) { + int ch; + + if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { + tsi_debug(DMA, &priv->pdev->dev, + "IRQ from DMA channel 0x%08x", dev_ch_int); + + for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { + if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) + continue; + tsi721_bdma_handler(&priv->bdma[ch]); + } + } + } +#endif + + /* For MSI mode re-enable device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) { + dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); + } + + return IRQ_HANDLED; +} + +static void tsi721_interrupts_init(struct tsi721_device *priv) +{ + u32 intr; + + /* Enable IDB interrupts */ + iowrite32(TSI721_SR_CHINT_ALL, + priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + iowrite32(TSI721_SR_CHINT_IDBQRCV, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + /* Enable SRIO MAC interrupts */ + iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, + priv->regs + TSI721_RIO_EM_DEV_INT_EN); + + /* Enable interrupts from channels in use */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | + (TSI721_INT_BDMA_CHAN_M & + ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); +#else + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); +#endif + iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); + + if (priv->flags & TSI721_USING_MSIX) + intr = TSI721_DEV_INT_SRIO; + else + intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + + iowrite32(intr, priv->regs + TSI721_DEV_INTE); + ioread32(priv->regs + TSI721_DEV_INTE); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) + * + * Handles outbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = (struct tsi721_device *)ptr; + int mbox; + + mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; + tsi721_omsg_handler(priv, mbox); + return IRQ_HANDLED; +} + +/** + * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) + * + * Handles inbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = (struct tsi721_device *)ptr; + int mbox; + + mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; + tsi721_imsg_handler(priv, mbox + 4); + return IRQ_HANDLED; +} + +/** + * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) + * + * Handles Tsi721 interrupts from SRIO MAC. + */ +static irqreturn_t tsi721_srio_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = (struct tsi721_device *)ptr; + u32 srio_int; + + /* Service SRIO MAC interrupts */ + srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); + if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) + tsi721_pw_handler(priv); + + return IRQ_HANDLED; +} + +/** + * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) + * + * Handles Tsi721 interrupts from SR2PC Channel. + * NOTE: At this moment services only one SR2PC channel associated with inbound + * doorbells. + */ +static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = (struct tsi721_device *)ptr; + u32 sr_ch_int; + + /* Service Inbound DB interrupt from SR2PC channel */ + sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) + tsi721_dbell_handler(priv); + + /* Clear interrupts */ + iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + /* Read back to ensure that interrupt was cleared */ + sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + + return IRQ_HANDLED; +} + +/** + * tsi721_request_msix - register interrupt service for MSI-X mode. + * @priv: tsi721 device-specific data structure + * + * Registers MSI-X interrupt service routines for interrupts that are active + * immediately after mport initialization. Messaging interrupt service routines + * should be registered during corresponding open requests. + */ +static int tsi721_request_msix(struct tsi721_device *priv) +{ + int err = 0; + + err = request_irq(priv->msix[TSI721_VECT_IDB].vector, + tsi721_sr2pc_ch_msix, 0, + priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv); + if (err) + return err; + + err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, + tsi721_srio_msix, 0, + priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv); + if (err) { + free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); + return err; + } + + return 0; +} + +/** + * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. + * @priv: pointer to tsi721 private data + * + * Configures MSI-X support for Tsi721. Supports only an exact number + * of requested vectors. + */ +static int tsi721_enable_msix(struct tsi721_device *priv) +{ + struct msix_entry entries[TSI721_VECT_MAX]; + int err; + int i; + + entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); + entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; + + /* + * Initialize MSI-X entries for Messaging Engine: + * this driver supports four RIO mailboxes (inbound and outbound) + * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore + * offset +4 is added to IB MBOX number. + */ + for (i = 0; i < RIO_MAX_MBOX; i++) { + entries[TSI721_VECT_IMB0_RCV + i].entry = + TSI721_MSIX_IMSG_DQ_RCV(i + 4); + entries[TSI721_VECT_IMB0_INT + i].entry = + TSI721_MSIX_IMSG_INT(i + 4); + entries[TSI721_VECT_OMB0_DONE + i].entry = + TSI721_MSIX_OMSG_DONE(i); + entries[TSI721_VECT_OMB0_INT + i].entry = + TSI721_MSIX_OMSG_INT(i); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + /* + * Initialize MSI-X entries for Block DMA Engine: + * this driver supports XXX DMA channels + * (one is reserved for SRIO maintenance transactions) + */ + for (i = 0; i < TSI721_DMA_CHNUM; i++) { + entries[TSI721_VECT_DMA0_DONE + i].entry = + TSI721_MSIX_DMACH_DONE(i); + entries[TSI721_VECT_DMA0_INT + i].entry = + TSI721_MSIX_DMACH_INT(i); + } +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + + err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); + if (err) { + tsi_err(&priv->pdev->dev, + "Failed to enable MSI-X (err=%d)", err); + return err; + } + + /* + * Copy MSI-X vector information into tsi721 private structure + */ + priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; + snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, + DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); + priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; + snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, + DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); + + for (i = 0; i < RIO_MAX_MBOX; i++) { + priv->msix[TSI721_VECT_IMB0_RCV + i].vector = + entries[TSI721_VECT_IMB0_RCV + i].vector; + snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_IMB0_INT + i].vector = + entries[TSI721_VECT_IMB0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_OMB0_DONE + i].vector = + entries[TSI721_VECT_OMB0_DONE + i].vector; + snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_OMB0_INT + i].vector = + entries[TSI721_VECT_OMB0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", + i, pci_name(priv->pdev)); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + for (i = 0; i < TSI721_DMA_CHNUM; i++) { + priv->msix[TSI721_VECT_DMA0_DONE + i].vector = + entries[TSI721_VECT_DMA0_DONE + i].vector; + snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_DMA0_INT + i].vector = + entries[TSI721_VECT_DMA0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", + i, pci_name(priv->pdev)); + } +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + + return 0; +} +#endif /* CONFIG_PCI_MSI */ + +static int tsi721_request_irq(struct tsi721_device *priv) +{ + int err; + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) + err = tsi721_request_msix(priv); + else +#endif + err = request_irq(priv->pdev->irq, tsi721_irqhandler, + (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, + DRV_NAME, (void *)priv); + + if (err) + tsi_err(&priv->pdev->dev, + "Unable to allocate interrupt, err=%d", err); + + return err; +} + +static void tsi721_free_irq(struct tsi721_device *priv) +{ +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); + free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv); + } else +#endif + free_irq(priv->pdev->irq, (void *)priv); +} + +static int +tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar, + u32 size, int *win_id) +{ + u64 win_base; + u64 bar_base; + u64 bar_end; + u32 align; + struct tsi721_ob_win *win; + struct tsi721_ob_win *new_win = NULL; + int new_win_idx = -1; + int i = 0; + + bar_base = pbar->base; + bar_end = bar_base + pbar->size; + win_base = bar_base; + align = size/TSI721_PC2SR_ZONES; + + while (i < TSI721_IBWIN_NUM) { + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + if (!priv->ob_win[i].active) { + if (new_win == NULL) { + new_win = &priv->ob_win[i]; + new_win_idx = i; + } + continue; + } + + /* + * If this window belongs to the current BAR check it + * for overlap + */ + win = &priv->ob_win[i]; + + if (win->base >= bar_base && win->base < bar_end) { + if (win_base < (win->base + win->size) && + (win_base + size) > win->base) { + /* Overlap detected */ + win_base = win->base + win->size; + win_base = ALIGN(win_base, align); + break; + } + } + } + } + + if (win_base + size > bar_end) + return -ENOMEM; + + if (!new_win) { + tsi_err(&priv->pdev->dev, "OBW count tracking failed"); + return -EIO; + } + + new_win->active = true; + new_win->base = win_base; + new_win->size = size; + new_win->pbar = pbar; + priv->obwin_cnt--; + pbar->free -= size; + *win_id = new_win_idx; + return 0; +} + +static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart, + u32 size, u32 flags, dma_addr_t *laddr) +{ + struct tsi721_device *priv = mport->priv; + int i; + struct tsi721_obw_bar *pbar; + struct tsi721_ob_win *ob_win; + int obw = -1; + u32 rval; + u64 rio_addr; + u32 zsize; + int ret = -ENOMEM; + + tsi_debug(OBW, &priv->pdev->dev, + "did=%d ra=0x%llx sz=0x%x", destid, rstart, size); + + if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1))) + return -EINVAL; + + if (priv->obwin_cnt == 0) + return -EBUSY; + + for (i = 0; i < 2; i++) { + if (priv->p2r_bar[i].free >= size) { + pbar = &priv->p2r_bar[i]; + ret = tsi721_obw_alloc(priv, pbar, size, &obw); + if (!ret) + break; + } + } + + if (ret) + return ret; + + WARN_ON(obw == -1); + ob_win = &priv->ob_win[obw]; + ob_win->destid = destid; + ob_win->rstart = rstart; + tsi_debug(OBW, &priv->pdev->dev, + "allocated OBW%d @%llx", obw, ob_win->base); + + /* + * Configure Outbound Window + */ + + zsize = size/TSI721_PC2SR_ZONES; + rio_addr = rstart; + + /* + * Program Address Translation Zones: + * This implementation uses all 8 zones associated wit window. + */ + for (i = 0; i < TSI721_PC2SR_ZONES; i++) { + + while (ioread32(priv->regs + TSI721_ZONE_SEL) & + TSI721_ZONE_SEL_GO) { + udelay(1); + } + + rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) | + TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR; + iowrite32(rval, priv->regs + TSI721_LUT_DATA0); + rval = (u32)(rio_addr >> 32); + iowrite32(rval, priv->regs + TSI721_LUT_DATA1); + rval = destid; + iowrite32(rval, priv->regs + TSI721_LUT_DATA2); + + rval = TSI721_ZONE_SEL_GO | (obw << 3) | i; + iowrite32(rval, priv->regs + TSI721_ZONE_SEL); + + rio_addr += zsize; + } + + iowrite32(TSI721_OBWIN_SIZE(size) << 8, + priv->regs + TSI721_OBWINSZ(obw)); + iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw)); + iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN, + priv->regs + TSI721_OBWINLB(obw)); + + *laddr = ob_win->base; + return 0; +} + +static void tsi721_unmap_outb_win(struct rio_mport *mport, + u16 destid, u64 rstart) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_ob_win *ob_win; + int i; + + tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart); + + for (i = 0; i < TSI721_OBWIN_NUM; i++) { + ob_win = &priv->ob_win[i]; + + if (ob_win->active && + ob_win->destid == destid && ob_win->rstart == rstart) { + tsi_debug(OBW, &priv->pdev->dev, + "free OBW%d @%llx", i, ob_win->base); + ob_win->active = false; + iowrite32(0, priv->regs + TSI721_OBWINLB(i)); + ob_win->pbar->free += ob_win->size; + priv->obwin_cnt++; + break; + } + } +} + +/** + * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables SREP translation regions. + */ +static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) +{ + int i, z; + u32 rval; + + /* Disable all PC2SR translation windows */ + for (i = 0; i < TSI721_OBWIN_NUM; i++) + iowrite32(0, priv->regs + TSI721_OBWINLB(i)); + + /* Initialize zone lookup tables to avoid ECC errors on reads */ + iowrite32(0, priv->regs + TSI721_LUT_DATA0); + iowrite32(0, priv->regs + TSI721_LUT_DATA1); + iowrite32(0, priv->regs + TSI721_LUT_DATA2); + + for (i = 0; i < TSI721_OBWIN_NUM; i++) { + for (z = 0; z < TSI721_PC2SR_ZONES; z++) { + while (ioread32(priv->regs + TSI721_ZONE_SEL) & + TSI721_ZONE_SEL_GO) { + udelay(1); + } + rval = TSI721_ZONE_SEL_GO | (i << 3) | z; + iowrite32(rval, priv->regs + TSI721_ZONE_SEL); + } + } + + if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) { + priv->obwin_cnt = 0; + return; + } + + priv->p2r_bar[0].free = priv->p2r_bar[0].size; + priv->p2r_bar[1].free = priv->p2r_bar[1].size; + + for (i = 0; i < TSI721_OBWIN_NUM; i++) + priv->ob_win[i].active = false; + + priv->obwin_cnt = TSI721_OBWIN_NUM; +} + +/** + * tsi721_rio_map_inb_mem -- Mapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + * @rstart: RapidIO space start address. + * @size: The mapping region size. + * @flags: Flags for mapping. 0 for using default flags. + * + * Return: 0 -- Success. + * + * This function will create the inbound mapping + * from rstart to lstart. + */ +static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u64 size, u32 flags) +{ + struct tsi721_device *priv = mport->priv; + int i, avail = -1; + u32 regval; + struct tsi721_ib_win *ib_win; + bool direct = (lstart == rstart); + u64 ibw_size; + dma_addr_t loc_start; + u64 ibw_start; + struct tsi721_ib_win_mapping *map = NULL; + int ret = -EBUSY; + + /* Max IBW size supported by HW is 16GB */ + if (size > 0x400000000UL) + return -EINVAL; + + if (direct) { + /* Calculate minimal acceptable window size and base address */ + + ibw_size = roundup_pow_of_two(size); + ibw_start = lstart & ~(ibw_size - 1); + + tsi_debug(IBW, &priv->pdev->dev, + "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx", + rstart, &lstart, size, ibw_start); + + while ((lstart + size) > (ibw_start + ibw_size)) { + ibw_size *= 2; + ibw_start = lstart & ~(ibw_size - 1); + /* Check for crossing IBW max size 16GB */ + if (ibw_size > 0x400000000UL) + return -EBUSY; + } + + loc_start = ibw_start; + + map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC); + if (map == NULL) + return -ENOMEM; + + } else { + tsi_debug(IBW, &priv->pdev->dev, + "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx", + rstart, &lstart, size); + + if (!is_power_of_2(size) || size < 0x1000 || + ((u64)lstart & (size - 1)) || (rstart & (size - 1))) + return -EINVAL; + if (priv->ibwin_cnt == 0) + return -EBUSY; + ibw_start = rstart; + ibw_size = size; + loc_start = lstart; + } + + /* + * Scan for overlapping with active regions and mark the first available + * IB window at the same time. + */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + ib_win = &priv->ib_win[i]; + + if (!ib_win->active) { + if (avail == -1) { + avail = i; + ret = 0; + } + } else if (ibw_start < (ib_win->rstart + ib_win->size) && + (ibw_start + ibw_size) > ib_win->rstart) { + /* Return error if address translation involved */ + if (!direct || ib_win->xlat) { + ret = -EFAULT; + break; + } + + /* + * Direct mappings usually are larger than originally + * requested fragments - check if this new request fits + * into it. + */ + if (rstart >= ib_win->rstart && + (rstart + size) <= (ib_win->rstart + + ib_win->size)) { + /* We are in - no further mapping required */ + map->lstart = lstart; + list_add_tail(&map->node, &ib_win->mappings); + return 0; + } + + ret = -EFAULT; + break; + } + } + + if (ret) + goto out; + i = avail; + + /* Sanity check: available IB window must be disabled at this point */ + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) { + ret = -EIO; + goto out; + } + + ib_win = &priv->ib_win[i]; + ib_win->active = true; + ib_win->rstart = ibw_start; + ib_win->lstart = loc_start; + ib_win->size = ibw_size; + ib_win->xlat = (lstart != rstart); + INIT_LIST_HEAD(&ib_win->mappings); + + /* + * When using direct IBW mapping and have larger than requested IBW size + * we can have multiple local memory blocks mapped through the same IBW + * To handle this situation we maintain list of "clients" for such IBWs. + */ + if (direct) { + map->lstart = lstart; + list_add_tail(&map->node, &ib_win->mappings); + } + + iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8, + priv->regs + TSI721_IBWIN_SZ(i)); + + iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i)); + iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD), + priv->regs + TSI721_IBWIN_TLA(i)); + + iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i)); + iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, + priv->regs + TSI721_IBWIN_LB(i)); + + priv->ibwin_cnt--; + + tsi_debug(IBW, &priv->pdev->dev, + "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx", + i, ibw_start, &loc_start, ibw_size); + + return 0; +out: + kfree(map); + return ret; +} + +/** + * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + */ +static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, + dma_addr_t lstart) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_ib_win *ib_win; + int i; + + tsi_debug(IBW, &priv->pdev->dev, + "Unmap IBW mapped to PCIe_%pad", &lstart); + + /* Search for matching active inbound translation window */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + ib_win = &priv->ib_win[i]; + + /* Address translating IBWs must to be an exact march */ + if (!ib_win->active || + (ib_win->xlat && lstart != ib_win->lstart)) + continue; + + if (lstart >= ib_win->lstart && + lstart < (ib_win->lstart + ib_win->size)) { + + if (!ib_win->xlat) { + struct tsi721_ib_win_mapping *map; + int found = 0; + + list_for_each_entry(map, + &ib_win->mappings, node) { + if (map->lstart == lstart) { + list_del(&map->node); + kfree(map); + found = 1; + break; + } + } + + if (!found) + continue; + + if (!list_empty(&ib_win->mappings)) + break; + } + + tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i); + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + ib_win->active = false; + priv->ibwin_cnt++; + break; + } + } + + if (i == TSI721_IBWIN_NUM) + tsi_debug(IBW, &priv->pdev->dev, + "IB window mapped to %pad not found", &lstart); +} + +/** + * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables inbound windows. + */ +static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) +{ + int i; + + /* Disable all SR2PC inbound windows */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + priv->ibwin_cnt = TSI721_IBWIN_NUM; +} + +/* + * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe) + * translation regions. + * @priv: pointer to tsi721 device private data + */ +static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv) +{ + struct tsi721_ib_win *ib_win; + int i; + + /* Disable all active SR2PC inbound windows */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + ib_win = &priv->ib_win[i]; + if (ib_win->active) { + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + ib_win->active = false; + } + } +} + +/** + * tsi721_port_write_init - Inbound port write interface init + * @priv: pointer to tsi721 private data + * + * Initializes inbound port write handler. + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_port_write_init(struct tsi721_device *priv) +{ + priv->pw_discard_count = 0; + INIT_WORK(&priv->pw_work, tsi721_pw_dpc); + spin_lock_init(&priv->pw_fifo_lock); + if (kfifo_alloc(&priv->pw_fifo, + TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { + tsi_err(&priv->pdev->dev, "PW FIFO allocation failed"); + return -ENOMEM; + } + + /* Use reliable port-write capture mode */ + iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); + return 0; +} + +static void tsi721_port_write_free(struct tsi721_device *priv) +{ + kfifo_free(&priv->pw_fifo); +} + +static int tsi721_doorbell_init(struct tsi721_device *priv) +{ + /* Outbound Doorbells do not require any setup. + * Tsi721 uses dedicated PCI BAR1 to generate doorbells. + * That BAR1 was mapped during the probe routine. + */ + + /* Initialize Inbound Doorbell processing DPC and queue */ + priv->db_discard_count = 0; + INIT_WORK(&priv->idb_work, tsi721_db_dpc); + + /* Allocate buffer for inbound doorbells queue */ + priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, + IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, + &priv->idb_dma, GFP_KERNEL); + if (!priv->idb_base) + return -ENOMEM; + + tsi_debug(DBELL, &priv->pdev->dev, + "Allocated IDB buffer @ %p (phys = %pad)", + priv->idb_base, &priv->idb_dma); + + iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), + priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); + iowrite32(((u64)priv->idb_dma >> 32), + priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); + iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), + priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); + /* Enable accepting all inbound doorbells */ + iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); + + iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); + + iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + + return 0; +} + +static void tsi721_doorbell_free(struct tsi721_device *priv) +{ + if (priv->idb_base == NULL) + return; + + /* Free buffer allocated for inbound doorbell queue */ + dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, + priv->idb_base, priv->idb_dma); + priv->idb_base = NULL; +} + +/** + * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. + * @priv: pointer to tsi721 private data + * + * Initialize BDMA channel allocated for RapidIO maintenance read/write + * request generation + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_bdma_maint_init(struct tsi721_device *priv) +{ + struct tsi721_dma_desc *bd_ptr; + u64 *sts_ptr; + dma_addr_t bd_phys, sts_phys; + int sts_size; + int bd_num = 2; + void __iomem *regs; + + tsi_debug(MAINT, &priv->pdev->dev, + "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT); + + /* + * Initialize DMA channel for maintenance requests + */ + + priv->mdma.ch_id = TSI721_DMACH_MAINT; + regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); + + /* Allocate space for DMA descriptors */ + bd_ptr = dma_alloc_coherent(&priv->pdev->dev, + bd_num * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_KERNEL); + if (!bd_ptr) + return -ENOMEM; + + priv->mdma.bd_num = bd_num; + priv->mdma.bd_phys = bd_phys; + priv->mdma.bd_base = bd_ptr; + + tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)", + bd_ptr, &bd_phys); + + /* Allocate space for descriptor status FIFO */ + sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? + bd_num : TSI721_DMA_MINSTSSZ; + sts_size = roundup_pow_of_two(sts_size); + sts_ptr = dma_alloc_coherent(&priv->pdev->dev, + sts_size * sizeof(struct tsi721_dma_sts), + &sts_phys, GFP_KERNEL); + if (!sts_ptr) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(&priv->pdev->dev, + bd_num * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + priv->mdma.bd_base = NULL; + return -ENOMEM; + } + + priv->mdma.sts_phys = sts_phys; + priv->mdma.sts_base = sts_ptr; + priv->mdma.sts_size = sts_size; + + tsi_debug(MAINT, &priv->pdev->dev, + "desc status FIFO @ %p (phys = %pad) size=0x%x", + sts_ptr, &sts_phys, sts_size); + + /* Initialize DMA descriptors ring */ + bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); + bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & + TSI721_DMAC_DPTRL_MASK); + bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), + regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); + iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), + regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), + regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); + + ioread32(regs + TSI721_DMAC_INT); + + /* Toggle DMA channel initialization */ + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + ioread32(regs + TSI721_DMAC_CTL); + udelay(10); + + return 0; +} + +static int tsi721_bdma_maint_free(struct tsi721_device *priv) +{ + u32 ch_stat; + struct tsi721_bdma_maint *mdma = &priv->mdma; + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); + + if (mdma->bd_base == NULL) + return 0; + + /* Check if DMA channel still running */ + ch_stat = ioread32(regs + TSI721_DMAC_STS); + if (ch_stat & TSI721_DMAC_STS_RUN) + return -EFAULT; + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + + /* Free space allocated for DMA descriptors */ + dma_free_coherent(&priv->pdev->dev, + mdma->bd_num * sizeof(struct tsi721_dma_desc), + mdma->bd_base, mdma->bd_phys); + mdma->bd_base = NULL; + + /* Free space allocated for status FIFO */ + dma_free_coherent(&priv->pdev->dev, + mdma->sts_size * sizeof(struct tsi721_dma_sts), + mdma->sts_base, mdma->sts_phys); + mdma->sts_base = NULL; + return 0; +} + +/* Enable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Inbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* Enable Inbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); + iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to enable next levels + */ + + /* Enable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), + priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Inbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* Disable Inbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); + rval &= ~inte_mask; + iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to disable next levels + */ + + /* Disable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + rval &= ~TSI721_INT_IMSG_CHAN(ch); + iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Enable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Outbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + + /* Enable Outbound Messaging channel interrupts */ + rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); + iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to enable next levels + */ + + /* Enable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), + priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Outbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + + /* Disable Outbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); + rval &= ~inte_mask; + iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to disable next levels + */ + + /* Disable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + rval &= ~TSI721_INT_OMSG_CHAN(ch); + iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/** + * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue + * @mport: Master port with outbound message queue + * @rdev: Target of outbound message + * @mbox: Outbound mailbox + * @buffer: Message to add to outbound queue + * @len: Length of message + */ +static int +tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_omsg_desc *desc; + u32 tx_slot; + unsigned long flags; + + if (!priv->omsg_init[mbox] || + len > TSI721_MSG_MAX_SIZE || len < 8) + return -EINVAL; + + spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags); + + tx_slot = priv->omsg_ring[mbox].tx_slot; + + /* Copy copy message into transfer buffer */ + memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); + + if (len & 0x7) + len += 8; + + /* Build descriptor associated with buffer */ + desc = priv->omsg_ring[mbox].omd_base; + desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); +#ifdef TSI721_OMSG_DESC_INT + /* Request IOF_DONE interrupt generation for each N-th frame in queue */ + if (tx_slot % 4 == 0) + desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); +#endif + desc[tx_slot].msg_info = + cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | + (0xe << 12) | (len & 0xff8)); + desc[tx_slot].bufptr_lo = + cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & + 0xffffffff); + desc[tx_slot].bufptr_hi = + cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); + + priv->omsg_ring[mbox].wr_count++; + + /* Go to next descriptor */ + if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { + priv->omsg_ring[mbox].tx_slot = 0; + /* Move through the ring link descriptor at the end */ + priv->omsg_ring[mbox].wr_count++; + } + + mb(); + + /* Set new write count value */ + iowrite32(priv->omsg_ring[mbox].wr_count, + priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + + spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags); + + return 0; +} + +/** + * tsi721_omsg_handler - Outbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch: number of OB MSG channel to service + * + * Services channel interrupts from outbound messaging engine. + */ +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) +{ + u32 omsg_int; + struct rio_mport *mport = &priv->mport; + void *dev_id = NULL; + u32 tx_slot = 0xffffffff; + int do_callback = 0; + + spin_lock(&priv->omsg_ring[ch].lock); + + omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); + + if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) + tsi_info(&priv->pdev->dev, + "OB MBOX%d: Status FIFO is full", ch); + + if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { + u32 srd_ptr; + u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; + int i, j; + + /* + * Find last successfully processed descriptor + */ + + /* Check and clear descriptor status FIFO entries */ + srd_ptr = priv->omsg_ring[ch].sts_rdptr; + sts_ptr = priv->omsg_ring[ch].sts_base; + j = srd_ptr * 8; + while (sts_ptr[j]) { + for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { + prev_ptr = last_ptr; + last_ptr = le64_to_cpu(sts_ptr[j]); + sts_ptr[j] = 0; + } + + ++srd_ptr; + srd_ptr %= priv->omsg_ring[ch].sts_size; + j = srd_ptr * 8; + } + + if (last_ptr == 0) + goto no_sts_update; + + priv->omsg_ring[ch].sts_rdptr = srd_ptr; + iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); + + if (!mport->outb_msg[ch].mcback) + goto no_sts_update; + + /* Inform upper layer about transfer completion */ + + tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ + sizeof(struct tsi721_omsg_desc); + + /* + * Check if this is a Link Descriptor (LD). + * If yes, ignore LD and use descriptor processed + * before LD. + */ + if (tx_slot == priv->omsg_ring[ch].size) { + if (prev_ptr) + tx_slot = (prev_ptr - + (u64)priv->omsg_ring[ch].omd_phys)/ + sizeof(struct tsi721_omsg_desc); + else + goto no_sts_update; + } + + if (tx_slot >= priv->omsg_ring[ch].size) + tsi_debug(OMSG, &priv->pdev->dev, + "OB_MSG tx_slot=%x > size=%x", + tx_slot, priv->omsg_ring[ch].size); + WARN_ON(tx_slot >= priv->omsg_ring[ch].size); + + /* Move slot index to the next message to be sent */ + ++tx_slot; + if (tx_slot == priv->omsg_ring[ch].size) + tx_slot = 0; + + dev_id = priv->omsg_ring[ch].dev_id; + do_callback = 1; + } + +no_sts_update: + + if (omsg_int & TSI721_OBDMAC_INT_ERROR) { + /* + * Outbound message operation aborted due to error, + * reinitialize OB MSG channel + */ + + tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x", + ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); + + iowrite32(TSI721_OBDMAC_INT_ERROR, + priv->regs + TSI721_OBDMAC_INT(ch)); + iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, + priv->regs + TSI721_OBDMAC_CTL(ch)); + ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); + + /* Inform upper level to clear all pending tx slots */ + dev_id = priv->omsg_ring[ch].dev_id; + tx_slot = priv->omsg_ring[ch].tx_slot; + do_callback = 1; + + /* Synch tx_slot tracking */ + iowrite32(priv->omsg_ring[ch].tx_slot, + priv->regs + TSI721_OBDMAC_DRDCNT(ch)); + ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); + priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; + priv->omsg_ring[ch].sts_rdptr = 0; + } + + /* Clear channel interrupts */ + iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); + + if (!(priv->flags & TSI721_USING_MSIX)) { + u32 ch_inte; + + /* Re-enable channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte |= TSI721_INT_OMSG_CHAN(ch); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + } + + spin_unlock(&priv->omsg_ring[ch].lock); + + if (mport->outb_msg[ch].mcback && do_callback) + mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot); +} + +/** + * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox + * @mport: Master port implementing Outbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the outbound mailbox ring + */ +static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, + int mbox, int entries) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_omsg_desc *bd_ptr; + int i, rc = 0; + + if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || + (entries > (TSI721_OMSGD_RING_SIZE)) || + (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { + rc = -EINVAL; + goto out; + } + + if ((mbox_sel & (1 << mbox)) == 0) { + rc = -ENODEV; + goto out; + } + + priv->omsg_ring[mbox].dev_id = dev_id; + priv->omsg_ring[mbox].size = entries; + priv->omsg_ring[mbox].sts_rdptr = 0; + spin_lock_init(&priv->omsg_ring[mbox].lock); + + /* Outbound Msg Buffer allocation based on + the number of maximum descriptor entries */ + for (i = 0; i < entries; i++) { + priv->omsg_ring[mbox].omq_base[i] = + dma_alloc_coherent( + &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, + &priv->omsg_ring[mbox].omq_phys[i], + GFP_KERNEL); + if (priv->omsg_ring[mbox].omq_base[i] == NULL) { + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d data buffer", mbox); + rc = -ENOMEM; + goto out_buf; + } + } + + /* Outbound message descriptor allocation */ + priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( + &priv->pdev->dev, + (entries + 1) * sizeof(struct tsi721_omsg_desc), + &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); + if (priv->omsg_ring[mbox].omd_base == NULL) { + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d descriptor memory", mbox); + rc = -ENOMEM; + goto out_buf; + } + + priv->omsg_ring[mbox].tx_slot = 0; + + /* Outbound message descriptor status FIFO allocation */ + priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); + priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + &priv->omsg_ring[mbox].sts_phys, + GFP_KERNEL); + if (priv->omsg_ring[mbox].sts_base == NULL) { + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d status FIFO", mbox); + rc = -ENOMEM; + goto out_desc; + } + + /* + * Configure Outbound Messaging Engine + */ + + /* Setup Outbound Message descriptor pointer */ + iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), + priv->regs + TSI721_OBDMAC_DPTRH(mbox)); + iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & + TSI721_OBDMAC_DPTRL_MASK), + priv->regs + TSI721_OBDMAC_DPTRL(mbox)); + + /* Setup Outbound Message descriptor status FIFO */ + iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), + priv->regs + TSI721_OBDMAC_DSBH(mbox)); + iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & + TSI721_OBDMAC_DSBL_MASK), + priv->regs + TSI721_OBDMAC_DSBL(mbox)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), + priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); + + /* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + int idx = TSI721_VECT_OMB0_DONE + mbox; + + /* Request interrupt service if we are in MSI-X mode */ + rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); + + if (rc) { + tsi_debug(OMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for OBOX%d-DONE", + mbox); + goto out_stat; + } + + idx = TSI721_VECT_OMB0_INT + mbox; + rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); + + if (rc) { + tsi_debug(OMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for MBOX%d-INT", mbox); + idx = TSI721_VECT_OMB0_DONE + mbox; + free_irq(priv->msix[idx].vector, (void *)priv); + goto out_stat; + } + } +#endif /* CONFIG_PCI_MSI */ + + tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); + + /* Initialize Outbound Message descriptors ring */ + bd_ptr = priv->omsg_ring[mbox].omd_base; + bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); + bd_ptr[entries].msg_info = 0; + bd_ptr[entries].next_lo = + cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & + TSI721_OBDMAC_DPTRL_MASK); + bd_ptr[entries].next_hi = + cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); + priv->omsg_ring[mbox].wr_count = 0; + mb(); + + /* Initialize Outbound Message engine */ + iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, + priv->regs + TSI721_OBDMAC_CTL(mbox)); + ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + udelay(10); + + priv->omsg_init[mbox] = 1; + + return 0; + +#ifdef CONFIG_PCI_MSI +out_stat: + dma_free_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + priv->omsg_ring[mbox].sts_base, + priv->omsg_ring[mbox].sts_phys); + + priv->omsg_ring[mbox].sts_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_desc: + dma_free_coherent(&priv->pdev->dev, + (entries + 1) * sizeof(struct tsi721_omsg_desc), + priv->omsg_ring[mbox].omd_base, + priv->omsg_ring[mbox].omd_phys); + + priv->omsg_ring[mbox].omd_base = NULL; + +out_buf: + for (i = 0; i < priv->omsg_ring[mbox].size; i++) { + if (priv->omsg_ring[mbox].omq_base[i]) { + dma_free_coherent(&priv->pdev->dev, + TSI721_MSG_BUFFER_SIZE, + priv->omsg_ring[mbox].omq_base[i], + priv->omsg_ring[mbox].omq_phys[i]); + + priv->omsg_ring[mbox].omq_base[i] = NULL; + } + } + +out: + return rc; +} + +/** + * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox + * @mport: Master port implementing the outbound message unit + * @mbox: Mailbox to close + */ +static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + u32 i; + + if (!priv->omsg_init[mbox]) + return; + priv->omsg_init[mbox] = 0; + + /* Disable Interrupts */ + + tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, + (void *)priv); + free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, + (void *)priv); + } +#endif /* CONFIG_PCI_MSI */ + + /* Free OMSG Descriptor Status FIFO */ + dma_free_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + priv->omsg_ring[mbox].sts_base, + priv->omsg_ring[mbox].sts_phys); + + priv->omsg_ring[mbox].sts_base = NULL; + + /* Free OMSG descriptors */ + dma_free_coherent(&priv->pdev->dev, + (priv->omsg_ring[mbox].size + 1) * + sizeof(struct tsi721_omsg_desc), + priv->omsg_ring[mbox].omd_base, + priv->omsg_ring[mbox].omd_phys); + + priv->omsg_ring[mbox].omd_base = NULL; + + /* Free message buffers */ + for (i = 0; i < priv->omsg_ring[mbox].size; i++) { + if (priv->omsg_ring[mbox].omq_base[i]) { + dma_free_coherent(&priv->pdev->dev, + TSI721_MSG_BUFFER_SIZE, + priv->omsg_ring[mbox].omq_base[i], + priv->omsg_ring[mbox].omq_phys[i]); + + priv->omsg_ring[mbox].omq_base[i] = NULL; + } + } +} + +/** + * tsi721_imsg_handler - Inbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch: inbound message channel number to service + * + * Services channel interrupts from inbound messaging engine. + */ +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) +{ + u32 mbox = ch - 4; + u32 imsg_int; + struct rio_mport *mport = &priv->mport; + + spin_lock(&priv->imsg_ring[mbox].lock); + + imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); + + if (imsg_int & TSI721_IBDMAC_INT_SRTO) + tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox); + + if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) + tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox); + + if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) + tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox); + + /* Clear IB channel interrupts */ + iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* If an IB Msg is received notify the upper layer */ + if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && + mport->inb_msg[mbox].mcback) + mport->inb_msg[mbox].mcback(mport, + priv->imsg_ring[mbox].dev_id, mbox, -1); + + if (!(priv->flags & TSI721_USING_MSIX)) { + u32 ch_inte; + + /* Re-enable channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte |= TSI721_INT_IMSG_CHAN(ch); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + } + + spin_unlock(&priv->imsg_ring[mbox].lock); +} + +/** + * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the inbound mailbox ring + */ +static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, + int mbox, int entries) +{ + struct tsi721_device *priv = mport->priv; + int ch = mbox + 4; + int i; + u64 *free_ptr; + int rc = 0; + + if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || + (entries > TSI721_IMSGD_RING_SIZE) || + (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { + rc = -EINVAL; + goto out; + } + + if ((mbox_sel & (1 << mbox)) == 0) { + rc = -ENODEV; + goto out; + } + + /* Initialize IB Messaging Ring */ + priv->imsg_ring[mbox].dev_id = dev_id; + priv->imsg_ring[mbox].size = entries; + priv->imsg_ring[mbox].rx_slot = 0; + priv->imsg_ring[mbox].desc_rdptr = 0; + priv->imsg_ring[mbox].fq_wrptr = 0; + for (i = 0; i < priv->imsg_ring[mbox].size; i++) + priv->imsg_ring[mbox].imq_base[i] = NULL; + spin_lock_init(&priv->imsg_ring[mbox].lock); + + /* Allocate buffers for incoming messages */ + priv->imsg_ring[mbox].buf_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * TSI721_MSG_BUFFER_SIZE, + &priv->imsg_ring[mbox].buf_phys, + GFP_KERNEL); + + if (priv->imsg_ring[mbox].buf_base == NULL) { + tsi_err(&priv->pdev->dev, + "Failed to allocate buffers for IB MBOX%d", mbox); + rc = -ENOMEM; + goto out; + } + + /* Allocate memory for circular free list */ + priv->imsg_ring[mbox].imfq_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * 8, + &priv->imsg_ring[mbox].imfq_phys, + GFP_KERNEL); + + if (priv->imsg_ring[mbox].imfq_base == NULL) { + tsi_err(&priv->pdev->dev, + "Failed to allocate free queue for IB MBOX%d", mbox); + rc = -ENOMEM; + goto out_buf; + } + + /* Allocate memory for Inbound message descriptors */ + priv->imsg_ring[mbox].imd_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * sizeof(struct tsi721_imsg_desc), + &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); + + if (priv->imsg_ring[mbox].imd_base == NULL) { + tsi_err(&priv->pdev->dev, + "Failed to allocate descriptor memory for IB MBOX%d", + mbox); + rc = -ENOMEM; + goto out_dma; + } + + /* Fill free buffer pointer list */ + free_ptr = priv->imsg_ring[mbox].imfq_base; + for (i = 0; i < entries; i++) + free_ptr[i] = cpu_to_le64( + (u64)(priv->imsg_ring[mbox].buf_phys) + + i * 0x1000); + + mb(); + + /* + * For mapping of inbound SRIO Messages into appropriate queues we need + * to set Inbound Device ID register in the messaging engine. We do it + * once when first inbound mailbox is requested. + */ + if (!(priv->flags & TSI721_IMSGID_SET)) { + iowrite32((u32)priv->mport.host_deviceid, + priv->regs + TSI721_IB_DEVID); + priv->flags |= TSI721_IMSGID_SET; + } + + /* + * Configure Inbound Messaging channel (ch = mbox + 4) + */ + + /* Setup Inbound Message free queue */ + iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), + priv->regs + TSI721_IBDMAC_FQBH(ch)); + iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & + TSI721_IBDMAC_FQBL_MASK), + priv->regs+TSI721_IBDMAC_FQBL(ch)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), + priv->regs + TSI721_IBDMAC_FQSZ(ch)); + + /* Setup Inbound Message descriptor queue */ + iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), + priv->regs + TSI721_IBDMAC_DQBH(ch)); + iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & + (u32)TSI721_IBDMAC_DQBL_MASK), + priv->regs+TSI721_IBDMAC_DQBL(ch)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), + priv->regs + TSI721_IBDMAC_DQSZ(ch)); + + /* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + int idx = TSI721_VECT_IMB0_RCV + mbox; + + /* Request interrupt service if we are in MSI-X mode */ + rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); + + if (rc) { + tsi_debug(IMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for IBOX%d-DONE", + mbox); + goto out_desc; + } + + idx = TSI721_VECT_IMB0_INT + mbox; + rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); + + if (rc) { + tsi_debug(IMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for IBOX%d-INT", mbox); + free_irq( + priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, + (void *)priv); + goto out_desc; + } + } +#endif /* CONFIG_PCI_MSI */ + + tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); + + /* Initialize Inbound Message Engine */ + iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); + ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); + udelay(10); + priv->imsg_ring[mbox].fq_wrptr = entries - 1; + iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); + + priv->imsg_init[mbox] = 1; + return 0; + +#ifdef CONFIG_PCI_MSI +out_desc: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), + priv->imsg_ring[mbox].imd_base, + priv->imsg_ring[mbox].imd_phys); + + priv->imsg_ring[mbox].imd_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_dma: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * 8, + priv->imsg_ring[mbox].imfq_base, + priv->imsg_ring[mbox].imfq_phys); + + priv->imsg_ring[mbox].imfq_base = NULL; + +out_buf: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, + priv->imsg_ring[mbox].buf_base, + priv->imsg_ring[mbox].buf_phys); + + priv->imsg_ring[mbox].buf_base = NULL; + +out: + return rc; +} + +/** + * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Mailbox to close + */ +static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + u32 rx_slot; + int ch = mbox + 4; + + if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ + return; + priv->imsg_init[mbox] = 0; + + /* Disable Inbound Messaging Engine */ + + /* Disable Interrupts */ + tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, + (void *)priv); + free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, + (void *)priv); + } +#endif /* CONFIG_PCI_MSI */ + + /* Clear Inbound Buffer Queue */ + for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) + priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + + /* Free memory allocated for message buffers */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, + priv->imsg_ring[mbox].buf_base, + priv->imsg_ring[mbox].buf_phys); + + priv->imsg_ring[mbox].buf_base = NULL; + + /* Free memory allocated for free pointr list */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * 8, + priv->imsg_ring[mbox].imfq_base, + priv->imsg_ring[mbox].imfq_phys); + + priv->imsg_ring[mbox].imfq_base = NULL; + + /* Free memory allocated for RX descriptors */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), + priv->imsg_ring[mbox].imd_base, + priv->imsg_ring[mbox].imd_phys); + + priv->imsg_ring[mbox].imd_base = NULL; +} + +/** + * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * @buf: Buffer to add to inbound queue + */ +static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) +{ + struct tsi721_device *priv = mport->priv; + u32 rx_slot; + int rc = 0; + + rx_slot = priv->imsg_ring[mbox].rx_slot; + if (priv->imsg_ring[mbox].imq_base[rx_slot]) { + tsi_err(&priv->pdev->dev, + "Error adding inbound buffer %d, buffer exists", + rx_slot); + rc = -EINVAL; + goto out; + } + + priv->imsg_ring[mbox].imq_base[rx_slot] = buf; + + if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].rx_slot = 0; + +out: + return rc; +} + +/** + * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * + * Returns pointer to the message on success or NULL on failure. + */ +static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_imsg_desc *desc; + u32 rx_slot; + void *rx_virt = NULL; + u64 rx_phys; + void *buf = NULL; + u64 *free_ptr; + int ch = mbox + 4; + int msg_size; + + if (!priv->imsg_init[mbox]) + return NULL; + + desc = priv->imsg_ring[mbox].imd_base; + desc += priv->imsg_ring[mbox].desc_rdptr; + + if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) + goto out; + + rx_slot = priv->imsg_ring[mbox].rx_slot; + while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { + if (++rx_slot == priv->imsg_ring[mbox].size) + rx_slot = 0; + } + + rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | + le32_to_cpu(desc->bufptr_lo); + + rx_virt = priv->imsg_ring[mbox].buf_base + + (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); + + buf = priv->imsg_ring[mbox].imq_base[rx_slot]; + msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; + if (msg_size == 0) + msg_size = RIO_MAX_MSG_SIZE; + + memcpy(buf, rx_virt, msg_size); + priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + + desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); + if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].desc_rdptr = 0; + + iowrite32(priv->imsg_ring[mbox].desc_rdptr, + priv->regs + TSI721_IBDMAC_DQRP(ch)); + + /* Return free buffer into the pointer list */ + free_ptr = priv->imsg_ring[mbox].imfq_base; + free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); + + if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].fq_wrptr = 0; + + iowrite32(priv->imsg_ring[mbox].fq_wrptr, + priv->regs + TSI721_IBDMAC_FQWP(ch)); +out: + return buf; +} + +/** + * tsi721_messages_init - Initialization of Messaging Engine + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 messaging engine. + */ +static int tsi721_messages_init(struct tsi721_device *priv) +{ + int ch; + + iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); + iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); + iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); + + /* Set SRIO Message Request/Response Timeout */ + iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); + + /* Initialize Inbound Messaging Engine Registers */ + for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { + /* Clear interrupt bits */ + iowrite32(TSI721_IBDMAC_INT_MASK, + priv->regs + TSI721_IBDMAC_INT(ch)); + /* Clear Status */ + iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); + + iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, + priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); + iowrite32(TSI721_SMSG_ECC_NCOR_MASK, + priv->regs + TSI721_SMSG_ECC_NCOR(ch)); + } + + return 0; +} + +/** + * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * + * Returns pointer to the message on success or NULL on failure. + */ +static int tsi721_query_mport(struct rio_mport *mport, + struct rio_mport_attr *attr) +{ + struct tsi721_device *priv = mport->priv; + u32 rval; + + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0)); + if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0)); + attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0)); + attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; + } else + attr->link_speed = RIO_LINK_DOWN; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG; + attr->dma_max_sge = 0; + attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT; + attr->dma_align = 0; +#else + attr->flags = 0; +#endif + return 0; +} + +/** + * tsi721_disable_ints - disables all device interrupts + * @priv: pointer to tsi721 private data + */ +static void tsi721_disable_ints(struct tsi721_device *priv) +{ + int ch; + + /* Disable all device level interrupts */ + iowrite32(0, priv->regs + TSI721_DEV_INTE); + + /* Disable all Device Channel interrupts */ + iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); + + /* Disable all Inbound Msg Channel interrupts */ + for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) + iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); + + /* Disable all Outbound Msg Channel interrupts */ + for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) + iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); + + /* Disable all general messaging interrupts */ + iowrite32(0, priv->regs + TSI721_SMSG_INTE); + + /* Disable all BDMA Channel interrupts */ + for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) + iowrite32(0, + priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); + + /* Disable all general BDMA interrupts */ + iowrite32(0, priv->regs + TSI721_BDMA_INTE); + + /* Disable all SRIO Channel interrupts */ + for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) + iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); + + /* Disable all general SR2PC interrupts */ + iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); + + /* Disable all PC2SR interrupts */ + iowrite32(0, priv->regs + TSI721_PC2SR_INTE); + + /* Disable all I2C interrupts */ + iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); + + /* Disable SRIO MAC interrupts */ + iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); + iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); +} + +static struct rio_ops tsi721_rio_ops = { + .lcread = tsi721_lcread, + .lcwrite = tsi721_lcwrite, + .cread = tsi721_cread_dma, + .cwrite = tsi721_cwrite_dma, + .dsend = tsi721_dsend, + .open_inb_mbox = tsi721_open_inb_mbox, + .close_inb_mbox = tsi721_close_inb_mbox, + .open_outb_mbox = tsi721_open_outb_mbox, + .close_outb_mbox = tsi721_close_outb_mbox, + .add_outb_message = tsi721_add_outb_message, + .add_inb_buffer = tsi721_add_inb_buffer, + .get_inb_message = tsi721_get_inb_message, + .map_inb = tsi721_rio_map_inb_mem, + .unmap_inb = tsi721_rio_unmap_inb_mem, + .pwenable = tsi721_pw_enable, + .query_mport = tsi721_query_mport, + .map_outb = tsi721_map_outb_win, + .unmap_outb = tsi721_unmap_outb_win, +}; + +static void tsi721_mport_release(struct device *dev) +{ + struct rio_mport *mport = to_rio_mport(dev); + + tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id); +} + +/** + * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 as RapidIO master port. + */ +static int tsi721_setup_mport(struct tsi721_device *priv) +{ + struct pci_dev *pdev = priv->pdev; + int err = 0; + struct rio_mport *mport = &priv->mport; + + err = rio_mport_initialize(mport); + if (err) + return err; + + mport->ops = &tsi721_rio_ops; + mport->index = 0; + mport->sys_size = 0; /* small system */ + mport->priv = (void *)priv; + mport->phys_efptr = 0x100; + mport->phys_rmap = 1; + mport->dev.parent = &pdev->dev; + mport->dev.release = tsi721_mport_release; + + INIT_LIST_HEAD(&mport->dbells); + + rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); + rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); + rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); + snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", + dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); + + /* Hook up interrupt handler */ + +#ifdef CONFIG_PCI_MSI + if (!tsi721_enable_msix(priv)) + priv->flags |= TSI721_USING_MSIX; + else if (!pci_enable_msi(pdev)) + priv->flags |= TSI721_USING_MSI; + else + tsi_debug(MPORT, &pdev->dev, + "MSI/MSI-X is not available. Using legacy INTx."); +#endif /* CONFIG_PCI_MSI */ + + err = tsi721_request_irq(priv); + + if (err) { + tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)", + pdev->irq, err); + return err; + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + err = tsi721_register_dma(priv); + if (err) + goto err_exit; +#endif + /* Enable SRIO link */ + iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | + TSI721_DEVCTL_SRBOOT_CMPL, + priv->regs + TSI721_DEVCTL); + + if (mport->host_deviceid >= 0) + iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | + RIO_PORT_GEN_DISCOVERED, + priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + else + iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + + err = rio_register_mport(mport); + if (err) { + tsi721_unregister_dma(priv); + goto err_exit; + } + + return 0; + +err_exit: + tsi721_free_irq(priv); + return err; +} + +static int tsi721_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct tsi721_device *priv; + int err; + + priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_exit; + } + + err = pci_enable_device(pdev); + if (err) { + tsi_err(&pdev->dev, "Failed to enable PCI device"); + goto err_clean; + } + + priv->pdev = pdev; + +#ifdef DEBUG + { + int i; + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + tsi_debug(INIT, &pdev->dev, "res%d %pR", + i, &pdev->resource[i]); + } + } +#endif + /* + * Verify BAR configuration + */ + + /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ + if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || + pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || + pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { + tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0"); + err = -ENODEV; + goto err_disable_pdev; + } + + /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ + if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || + pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || + pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { + tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1"); + err = -ENODEV; + goto err_disable_pdev; + } + + /* + * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address + * space. + * NOTE: BAR_2 and BAR_4 are not used by this version of driver. + * It may be a good idea to keep them disabled using HW configuration + * to save PCI memory space. + */ + + priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0; + + if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) { + if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH) + tsi_debug(INIT, &pdev->dev, + "Prefetchable OBW BAR2 will not be used"); + else { + priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2); + priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2); + } + } + + if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) { + if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH) + tsi_debug(INIT, &pdev->dev, + "Prefetchable OBW BAR4 will not be used"); + else { + priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4); + priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4); + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + tsi_err(&pdev->dev, "Unable to obtain PCI resources"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + priv->regs = pci_ioremap_bar(pdev, BAR_0); + if (!priv->regs) { + tsi_err(&pdev->dev, "Unable to map device registers space"); + err = -ENOMEM; + goto err_free_res; + } + + priv->odb_base = pci_ioremap_bar(pdev, BAR_1); + if (!priv->odb_base) { + tsi_err(&pdev->dev, "Unable to map outbound doorbells space"); + err = -ENOMEM; + goto err_unmap_bars; + } + + /* Configure DMA attributes. */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + tsi_err(&pdev->dev, "Unable to set DMA mask"); + goto err_unmap_bars; + } + + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); + } else { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); + } + + BUG_ON(!pci_is_pcie(pdev)); + + /* Clear "no snoop" and "relaxed ordering" bits. */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); + + /* Override PCIe Maximum Read Request Size setting if requested */ + if (pcie_mrrs >= 0) { + if (pcie_mrrs <= 5) + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12); + else + tsi_info(&pdev->dev, + "Invalid MRRS override value %d", pcie_mrrs); + } + + /* Set PCIe completion timeout to 1-10ms */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2); + + /* + * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block + */ + pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); + pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, + TSI721_MSIXTBL_OFFSET); + pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, + TSI721_MSIXPBA_OFFSET); + pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); + /* End of FIXUP */ + + tsi721_disable_ints(priv); + + tsi721_init_pc2sr_mapping(priv); + tsi721_init_sr2pc_mapping(priv); + + if (tsi721_bdma_maint_init(priv)) { + tsi_err(&pdev->dev, "BDMA initialization failed"); + err = -ENOMEM; + goto err_unmap_bars; + } + + err = tsi721_doorbell_init(priv); + if (err) + goto err_free_bdma; + + tsi721_port_write_init(priv); + + err = tsi721_messages_init(priv); + if (err) + goto err_free_consistent; + + err = tsi721_setup_mport(priv); + if (err) + goto err_free_consistent; + + pci_set_drvdata(pdev, priv); + tsi721_interrupts_init(priv); + + return 0; + +err_free_consistent: + tsi721_port_write_free(priv); + tsi721_doorbell_free(priv); +err_free_bdma: + tsi721_bdma_maint_free(priv); +err_unmap_bars: + if (priv->regs) + iounmap(priv->regs); + if (priv->odb_base) + iounmap(priv->odb_base); +err_free_res: + pci_release_regions(pdev); + pci_clear_master(pdev); +err_disable_pdev: + pci_disable_device(pdev); +err_clean: + kfree(priv); +err_exit: + return err; +} + +static void tsi721_remove(struct pci_dev *pdev) +{ + struct tsi721_device *priv = pci_get_drvdata(pdev); + + tsi_debug(EXIT, &pdev->dev, "enter"); + + tsi721_disable_ints(priv); + tsi721_free_irq(priv); + flush_scheduled_work(); + rio_unregister_mport(&priv->mport); + + tsi721_unregister_dma(priv); + tsi721_bdma_maint_free(priv); + tsi721_doorbell_free(priv); + tsi721_port_write_free(priv); + tsi721_close_sr2pc_mapping(priv); + + if (priv->regs) + iounmap(priv->regs); + if (priv->odb_base) + iounmap(priv->odb_base); +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) + pci_disable_msix(priv->pdev); + else if (priv->flags & TSI721_USING_MSI) + pci_disable_msi(priv->pdev); +#endif + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(priv); + tsi_debug(EXIT, &pdev->dev, "exit"); +} + +static void tsi721_shutdown(struct pci_dev *pdev) +{ + struct tsi721_device *priv = pci_get_drvdata(pdev); + + tsi_debug(EXIT, &pdev->dev, "enter"); + + tsi721_disable_ints(priv); + tsi721_dma_stop_all(priv); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id tsi721_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, + { 0, } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); + +static struct pci_driver tsi721_driver = { + .name = "tsi721", + .id_table = tsi721_pci_tbl, + .probe = tsi721_probe, + .remove = tsi721_remove, + .shutdown = tsi721_shutdown, +}; + +module_pci_driver(tsi721_driver); + +MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h new file mode 100644 index 000000000..4f996ce62 --- /dev/null +++ b/drivers/rapidio/devices/tsi721.h @@ -0,0 +1,923 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Tsi721 PCIExpress-to-SRIO bridge definitions + * + * Copyright 2011, Integrated Device Technology, Inc. + */ + +#ifndef __TSI721_H +#define __TSI721_H + +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_MAINT = BIT(3), /* maintenance ops messages */ + DBG_DMA = BIT(4), /* DMA transfer messages */ + DBG_DMAV = BIT(5), /* verbose DMA transfer messages */ + DBG_IBW = BIT(6), /* inbound window */ + DBG_EVENT = BIT(7), /* event handling messages */ + DBG_OBW = BIT(8), /* outbound window messages */ + DBG_DBELL = BIT(9), /* doorbell messages */ + DBG_OMSG = BIT(10), /* doorbell messages */ + DBG_IMSG = BIT(11), /* doorbell messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +extern u32 tsi_dbg_level; + +#define tsi_debug(level, dev, fmt, arg...) \ + do { \ + if (DBG_##level & tsi_dbg_level) \ + dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ + } while (0) +#else +#define tsi_debug(level, dev, fmt, arg...) \ + no_printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##arg) +#endif + +#define tsi_info(dev, fmt, arg...) \ + dev_info(dev, "%s: " fmt "\n", __func__, ##arg) + +#define tsi_warn(dev, fmt, arg...) \ + dev_warn(dev, "%s: WARNING " fmt "\n", __func__, ##arg) + +#define tsi_err(dev, fmt, arg...) \ + dev_err(dev, "%s: ERROR " fmt "\n", __func__, ##arg) + +#define DRV_NAME "tsi721" + +#define DEFAULT_HOPCOUNT 0xff +#define DEFAULT_DESTID 0xff + +/* PCI device ID */ +#define PCI_DEVICE_ID_TSI721 0x80ab + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_2 2 +#define BAR_4 4 + +#define TSI721_PC2SR_BARS 2 +#define TSI721_PC2SR_WINS 8 +#define TSI721_PC2SR_ZONES 8 +#define TSI721_MAINT_WIN 0 /* Window for outbound maintenance requests */ +#define IDB_QUEUE 0 /* Inbound Doorbell Queue to use */ +#define IDB_QSIZE 512 /* Inbound Doorbell Queue size */ + +/* Memory space sizes */ +#define TSI721_REG_SPACE_SIZE (512 * 1024) /* 512K */ +#define TSI721_DB_WIN_SIZE (16 * 1024 * 1024) /* 16MB */ + +#define RIO_TT_CODE_8 0x00000000 +#define RIO_TT_CODE_16 0x00000001 + +#define TSI721_DMA_MAXCH 8 +#define TSI721_DMA_MINSTSSZ 32 +#define TSI721_DMA_STSBLKSZ 8 + +#define TSI721_SRIO_MAXCH 8 + +#define DBELL_SID(buf) (((u8)buf[2] << 8) | (u8)buf[3]) +#define DBELL_TID(buf) (((u8)buf[4] << 8) | (u8)buf[5]) +#define DBELL_INF(buf) (((u8)buf[0] << 8) | (u8)buf[1]) + +#define TSI721_RIO_PW_MSG_SIZE 16 /* Tsi721 saves only 16 bytes of PW msg */ + +/* Register definitions */ + +/* + * Registers in PCIe configuration space + */ + +#define TSI721_PCIECFG_MSIXTBL 0x0a4 +#define TSI721_MSIXTBL_OFFSET 0x2c000 +#define TSI721_PCIECFG_MSIXPBA 0x0a8 +#define TSI721_MSIXPBA_OFFSET 0x2a000 +#define TSI721_PCIECFG_EPCTL 0x400 + +/* + * Event Management Registers + */ + +#define TSI721_RIO_EM_INT_STAT 0x10910 +#define TSI721_RIO_EM_INT_STAT_PW_RX 0x00010000 + +#define TSI721_RIO_EM_INT_ENABLE 0x10914 +#define TSI721_RIO_EM_INT_ENABLE_PW_RX 0x00010000 + +#define TSI721_RIO_EM_DEV_INT_EN 0x10930 +#define TSI721_RIO_EM_DEV_INT_EN_INT 0x00000001 + +/* + * Port-Write Block Registers + */ + +#define TSI721_RIO_PW_CTL 0x10a04 +#define TSI721_RIO_PW_CTL_PW_TIMER 0xf0000000 +#define TSI721_RIO_PW_CTL_PWT_DIS (0 << 28) +#define TSI721_RIO_PW_CTL_PWT_103 (1 << 28) +#define TSI721_RIO_PW_CTL_PWT_205 (1 << 29) +#define TSI721_RIO_PW_CTL_PWT_410 (1 << 30) +#define TSI721_RIO_PW_CTL_PWT_820 (1 << 31) +#define TSI721_RIO_PW_CTL_PWC_MODE 0x01000000 +#define TSI721_RIO_PW_CTL_PWC_CONT 0x00000000 +#define TSI721_RIO_PW_CTL_PWC_REL 0x01000000 + +#define TSI721_RIO_PW_RX_STAT 0x10a10 +#define TSI721_RIO_PW_RX_STAT_WR_SIZE 0x0000f000 +#define TSI_RIO_PW_RX_STAT_WDPTR 0x00000100 +#define TSI721_RIO_PW_RX_STAT_PW_SHORT 0x00000008 +#define TSI721_RIO_PW_RX_STAT_PW_TRUNC 0x00000004 +#define TSI721_RIO_PW_RX_STAT_PW_DISC 0x00000002 +#define TSI721_RIO_PW_RX_STAT_PW_VAL 0x00000001 + +#define TSI721_RIO_PW_RX_CAPT(x) (0x10a20 + (x)*4) + +/* + * Inbound Doorbells + */ + +#define TSI721_IDB_ENTRY_SIZE 64 + +#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 0x1000) +#define TSI721_IDQ_SUSPEND 0x00000002 +#define TSI721_IDQ_INIT 0x00000001 + +#define TSI721_IDQ_STS(x) (0x20004 + (x) * 0x1000) +#define TSI721_IDQ_RUN 0x00200000 + +#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 0x1000) +#define TSI721_IDQ_MASK_MASK 0xffff0000 +#define TSI721_IDQ_MASK_PATT 0x0000ffff + +#define TSI721_IDQ_RP(x) (0x2000c + (x) * 0x1000) +#define TSI721_IDQ_RP_PTR 0x0007ffff + +#define TSI721_IDQ_WP(x) (0x20010 + (x) * 0x1000) +#define TSI721_IDQ_WP_PTR 0x0007ffff + +#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 0x1000) +#define TSI721_IDQ_BASEL_ADDR 0xffffffc0 +#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 0x1000) +#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 0x1000) +#define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4) +#define TSI721_IDQ_SIZE_MIN 512 +#define TSI721_IDQ_SIZE_MAX (512 * 1024) + +#define TSI721_SR_CHINT(x) (0x20040 + (x) * 0x1000) +#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 0x1000) +#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 0x1000) +#define TSI721_SR_CHINT_ODBOK 0x00000020 +#define TSI721_SR_CHINT_IDBQRCV 0x00000010 +#define TSI721_SR_CHINT_SUSP 0x00000008 +#define TSI721_SR_CHINT_ODBTO 0x00000004 +#define TSI721_SR_CHINT_ODBRTRY 0x00000002 +#define TSI721_SR_CHINT_ODBERR 0x00000001 +#define TSI721_SR_CHINT_ALL 0x0000003f + +#define TSI721_IBWIN_NUM 8 + +#define TSI721_IBWIN_LB(x) (0x29000 + (x) * 0x20) +#define TSI721_IBWIN_LB_BA 0xfffff000 +#define TSI721_IBWIN_LB_WEN 0x00000001 + +#define TSI721_IBWIN_UB(x) (0x29004 + (x) * 0x20) +#define TSI721_IBWIN_SZ(x) (0x29008 + (x) * 0x20) +#define TSI721_IBWIN_SZ_SIZE 0x00001f00 +#define TSI721_IBWIN_SIZE(size) (__fls(size) - 12) + +#define TSI721_IBWIN_TLA(x) (0x2900c + (x) * 0x20) +#define TSI721_IBWIN_TLA_ADD 0xfffff000 +#define TSI721_IBWIN_TUA(x) (0x29010 + (x) * 0x20) + +#define TSI721_SR2PC_GEN_INTE 0x29800 +#define TSI721_SR2PC_PWE 0x29804 +#define TSI721_SR2PC_GEN_INT 0x29808 + +#define TSI721_DEV_INTE 0x29840 +#define TSI721_DEV_INT 0x29844 +#define TSI721_DEV_INTSET 0x29848 +#define TSI721_DEV_INT_BDMA_CH 0x00002000 +#define TSI721_DEV_INT_BDMA_NCH 0x00001000 +#define TSI721_DEV_INT_SMSG_CH 0x00000800 +#define TSI721_DEV_INT_SMSG_NCH 0x00000400 +#define TSI721_DEV_INT_SR2PC_CH 0x00000200 +#define TSI721_DEV_INT_SRIO 0x00000020 + +#define TSI721_DEV_CHAN_INTE 0x2984c +#define TSI721_DEV_CHAN_INT 0x29850 + +#define TSI721_INT_SR2PC_CHAN_M 0xff000000 +#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x))) +#define TSI721_INT_IMSG_CHAN_M 0x00ff0000 +#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) +#define TSI721_INT_OMSG_CHAN_M 0x0000ff00 +#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) +#define TSI721_INT_BDMA_CHAN_M 0x000000ff +#define TSI721_INT_BDMA_CHAN(x) (1 << (x)) + +/* + * PC2SR block registers + */ +#define TSI721_OBWIN_NUM TSI721_PC2SR_WINS + +#define TSI721_OBWINLB(x) (0x40000 + (x) * 0x20) +#define TSI721_OBWINLB_BA 0xffff8000 +#define TSI721_OBWINLB_WEN 0x00000001 + +#define TSI721_OBWINUB(x) (0x40004 + (x) * 0x20) + +#define TSI721_OBWINSZ(x) (0x40008 + (x) * 0x20) +#define TSI721_OBWINSZ_SIZE 0x00001f00 +#define TSI721_OBWIN_SIZE(size) (__fls(size) - 15) + +#define TSI721_ZONE_SEL 0x41300 +#define TSI721_ZONE_SEL_RD_WRB 0x00020000 +#define TSI721_ZONE_SEL_GO 0x00010000 +#define TSI721_ZONE_SEL_WIN 0x00000038 +#define TSI721_ZONE_SEL_ZONE 0x00000007 + +#define TSI721_LUT_DATA0 0x41304 +#define TSI721_LUT_DATA0_ADD 0xfffff000 +#define TSI721_LUT_DATA0_RDTYPE 0x00000f00 +#define TSI721_LUT_DATA0_NREAD 0x00000100 +#define TSI721_LUT_DATA0_MNTRD 0x00000200 +#define TSI721_LUT_DATA0_RDCRF 0x00000020 +#define TSI721_LUT_DATA0_WRCRF 0x00000010 +#define TSI721_LUT_DATA0_WRTYPE 0x0000000f +#define TSI721_LUT_DATA0_NWR 0x00000001 +#define TSI721_LUT_DATA0_MNTWR 0x00000002 +#define TSI721_LUT_DATA0_NWR_R 0x00000004 + +#define TSI721_LUT_DATA1 0x41308 + +#define TSI721_LUT_DATA2 0x4130c +#define TSI721_LUT_DATA2_HC 0xff000000 +#define TSI721_LUT_DATA2_ADD65 0x000c0000 +#define TSI721_LUT_DATA2_TT 0x00030000 +#define TSI721_LUT_DATA2_DSTID 0x0000ffff + +#define TSI721_PC2SR_INTE 0x41310 + +#define TSI721_DEVCTL 0x48004 +#define TSI721_DEVCTL_SRBOOT_CMPL 0x00000004 + +#define TSI721_I2C_INT_ENABLE 0x49120 + +/* + * Block DMA Engine Registers + * x = 0..7 + */ + +#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000) + +#define TSI721_DMAC_DWRCNT 0x000 +#define TSI721_DMAC_DRDCNT 0x004 + +#define TSI721_DMAC_CTL 0x008 +#define TSI721_DMAC_CTL_SUSP 0x00000002 +#define TSI721_DMAC_CTL_INIT 0x00000001 + +#define TSI721_DMAC_INT 0x00c +#define TSI721_DMAC_INT_STFULL 0x00000010 +#define TSI721_DMAC_INT_DONE 0x00000008 +#define TSI721_DMAC_INT_SUSP 0x00000004 +#define TSI721_DMAC_INT_ERR 0x00000002 +#define TSI721_DMAC_INT_IOFDONE 0x00000001 +#define TSI721_DMAC_INT_ALL 0x0000001f + +#define TSI721_DMAC_INTSET 0x010 + +#define TSI721_DMAC_STS 0x014 +#define TSI721_DMAC_STS_ABORT 0x00400000 +#define TSI721_DMAC_STS_RUN 0x00200000 +#define TSI721_DMAC_STS_CS 0x001f0000 + +#define TSI721_DMAC_INTE 0x018 + +#define TSI721_DMAC_DPTRL 0x024 +#define TSI721_DMAC_DPTRL_MASK 0xffffffe0 + +#define TSI721_DMAC_DPTRH 0x028 + +#define TSI721_DMAC_DSBL 0x02c +#define TSI721_DMAC_DSBL_MASK 0xffffffc0 + +#define TSI721_DMAC_DSBH 0x030 + +#define TSI721_DMAC_DSSZ 0x034 +#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f +#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) + +#define TSI721_DMAC_DSRP 0x038 +#define TSI721_DMAC_DSRP_MASK 0x0007ffff + +#define TSI721_DMAC_DSWP 0x03c +#define TSI721_DMAC_DSWP_MASK 0x0007ffff + +#define TSI721_BDMA_INTE 0x5f000 + +/* + * Messaging definitions + */ +#define TSI721_MSG_BUFFER_SIZE RIO_MAX_MSG_SIZE +#define TSI721_MSG_MAX_SIZE RIO_MAX_MSG_SIZE +#define TSI721_IMSG_MAXCH 8 +#define TSI721_IMSG_CHNUM TSI721_IMSG_MAXCH +#define TSI721_IMSGD_MIN_RING_SIZE 32 +#define TSI721_IMSGD_RING_SIZE 512 + +#define TSI721_OMSG_CHNUM 4 /* One channel per MBOX */ +#define TSI721_OMSGD_MIN_RING_SIZE 32 +#define TSI721_OMSGD_RING_SIZE 512 + +/* + * Outbound Messaging Engine Registers + * x = 0..7 + */ + +#define TSI721_OBDMAC_DWRCNT(x) (0x61000 + (x) * 0x1000) + +#define TSI721_OBDMAC_DRDCNT(x) (0x61004 + (x) * 0x1000) + +#define TSI721_OBDMAC_CTL(x) (0x61008 + (x) * 0x1000) +#define TSI721_OBDMAC_CTL_MASK 0x00000007 +#define TSI721_OBDMAC_CTL_RETRY_THR 0x00000004 +#define TSI721_OBDMAC_CTL_SUSPEND 0x00000002 +#define TSI721_OBDMAC_CTL_INIT 0x00000001 + +#define TSI721_OBDMAC_INT(x) (0x6100c + (x) * 0x1000) +#define TSI721_OBDMAC_INTSET(x) (0x61010 + (x) * 0x1000) +#define TSI721_OBDMAC_INTE(x) (0x61018 + (x) * 0x1000) +#define TSI721_OBDMAC_INT_MASK 0x0000001F +#define TSI721_OBDMAC_INT_ST_FULL 0x00000010 +#define TSI721_OBDMAC_INT_DONE 0x00000008 +#define TSI721_OBDMAC_INT_SUSPENDED 0x00000004 +#define TSI721_OBDMAC_INT_ERROR 0x00000002 +#define TSI721_OBDMAC_INT_IOF_DONE 0x00000001 +#define TSI721_OBDMAC_INT_ALL TSI721_OBDMAC_INT_MASK + +#define TSI721_OBDMAC_STS(x) (0x61014 + (x) * 0x1000) +#define TSI721_OBDMAC_STS_MASK 0x007f0000 +#define TSI721_OBDMAC_STS_ABORT 0x00400000 +#define TSI721_OBDMAC_STS_RUN 0x00200000 +#define TSI721_OBDMAC_STS_CS 0x001f0000 + +#define TSI721_OBDMAC_PWE(x) (0x6101c + (x) * 0x1000) +#define TSI721_OBDMAC_PWE_MASK 0x00000002 +#define TSI721_OBDMAC_PWE_ERROR_EN 0x00000002 + +#define TSI721_OBDMAC_DPTRL(x) (0x61020 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRL_MASK 0xfffffff0 + +#define TSI721_OBDMAC_DPTRH(x) (0x61024 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRH_MASK 0xffffffff + +#define TSI721_OBDMAC_DSBL(x) (0x61040 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBL_MASK 0xffffffc0 + +#define TSI721_OBDMAC_DSBH(x) (0x61044 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBH_MASK 0xffffffff + +#define TSI721_OBDMAC_DSSZ(x) (0x61048 + (x) * 0x1000) +#define TSI721_OBDMAC_DSSZ_MASK 0x0000000f + +#define TSI721_OBDMAC_DSRP(x) (0x6104c + (x) * 0x1000) +#define TSI721_OBDMAC_DSRP_MASK 0x0007ffff + +#define TSI721_OBDMAC_DSWP(x) (0x61050 + (x) * 0x1000) +#define TSI721_OBDMAC_DSWP_MASK 0x0007ffff + +#define TSI721_RQRPTO 0x60010 +#define TSI721_RQRPTO_MASK 0x00ffffff +#define TSI721_RQRPTO_VAL 400 /* Response TO value */ + +/* + * Inbound Messaging Engine Registers + * x = 0..7 + */ + +#define TSI721_IB_DEVID_GLOBAL 0xffff +#define TSI721_IBDMAC_FQBL(x) (0x61200 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBL_MASK 0xffffffc0 + +#define TSI721_IBDMAC_FQBH(x) (0x61204 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBH_MASK 0xffffffff + +#define TSI721_IBDMAC_FQSZ_ENTRY_INX TSI721_IMSGD_RING_SIZE +#define TSI721_IBDMAC_FQSZ(x) (0x61208 + (x) * 0x1000) +#define TSI721_IBDMAC_FQSZ_MASK 0x0000000f + +#define TSI721_IBDMAC_FQRP(x) (0x6120c + (x) * 0x1000) +#define TSI721_IBDMAC_FQRP_MASK 0x0007ffff + +#define TSI721_IBDMAC_FQWP(x) (0x61210 + (x) * 0x1000) +#define TSI721_IBDMAC_FQWP_MASK 0x0007ffff + +#define TSI721_IBDMAC_FQTH(x) (0x61214 + (x) * 0x1000) +#define TSI721_IBDMAC_FQTH_MASK 0x0007ffff + +#define TSI721_IB_DEVID 0x60020 +#define TSI721_IB_DEVID_MASK 0x0000ffff + +#define TSI721_IBDMAC_CTL(x) (0x61240 + (x) * 0x1000) +#define TSI721_IBDMAC_CTL_MASK 0x00000003 +#define TSI721_IBDMAC_CTL_SUSPEND 0x00000002 +#define TSI721_IBDMAC_CTL_INIT 0x00000001 + +#define TSI721_IBDMAC_STS(x) (0x61244 + (x) * 0x1000) +#define TSI721_IBDMAC_STS_MASK 0x007f0000 +#define TSI721_IBSMAC_STS_ABORT 0x00400000 +#define TSI721_IBSMAC_STS_RUN 0x00200000 +#define TSI721_IBSMAC_STS_CS 0x001f0000 + +#define TSI721_IBDMAC_INT(x) (0x61248 + (x) * 0x1000) +#define TSI721_IBDMAC_INTSET(x) (0x6124c + (x) * 0x1000) +#define TSI721_IBDMAC_INTE(x) (0x61250 + (x) * 0x1000) +#define TSI721_IBDMAC_INT_MASK 0x0000100f +#define TSI721_IBDMAC_INT_SRTO 0x00001000 +#define TSI721_IBDMAC_INT_SUSPENDED 0x00000008 +#define TSI721_IBDMAC_INT_PC_ERROR 0x00000004 +#define TSI721_IBDMAC_INT_FQ_LOW 0x00000002 +#define TSI721_IBDMAC_INT_DQ_RCV 0x00000001 +#define TSI721_IBDMAC_INT_ALL TSI721_IBDMAC_INT_MASK + +#define TSI721_IBDMAC_PWE(x) (0x61254 + (x) * 0x1000) +#define TSI721_IBDMAC_PWE_MASK 0x00001700 +#define TSI721_IBDMAC_PWE_SRTO 0x00001000 +#define TSI721_IBDMAC_PWE_ILL_FMT 0x00000400 +#define TSI721_IBDMAC_PWE_ILL_DEC 0x00000200 +#define TSI721_IBDMAC_PWE_IMP_SP 0x00000100 + +#define TSI721_IBDMAC_DQBL(x) (0x61300 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBL_MASK 0xffffffc0 +#define TSI721_IBDMAC_DQBL_ADDR 0xffffffc0 + +#define TSI721_IBDMAC_DQBH(x) (0x61304 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBH_MASK 0xffffffff + +#define TSI721_IBDMAC_DQRP(x) (0x61308 + (x) * 0x1000) +#define TSI721_IBDMAC_DQRP_MASK 0x0007ffff + +#define TSI721_IBDMAC_DQWR(x) (0x6130c + (x) * 0x1000) +#define TSI721_IBDMAC_DQWR_MASK 0x0007ffff + +#define TSI721_IBDMAC_DQSZ(x) (0x61314 + (x) * 0x1000) +#define TSI721_IBDMAC_DQSZ_MASK 0x0000000f + +/* + * Messaging Engine Interrupts + */ + +#define TSI721_SMSG_PWE 0x6a004 + +#define TSI721_SMSG_INTE 0x6a000 +#define TSI721_SMSG_INT 0x6a008 +#define TSI721_SMSG_INTSET 0x6a010 +#define TSI721_SMSG_INT_MASK 0x0086ffff +#define TSI721_SMSG_INT_UNS_RSP 0x00800000 +#define TSI721_SMSG_INT_ECC_NCOR 0x00040000 +#define TSI721_SMSG_INT_ECC_COR 0x00020000 +#define TSI721_SMSG_INT_ECC_NCOR_CH 0x0000ff00 +#define TSI721_SMSG_INT_ECC_COR_CH 0x000000ff + +#define TSI721_SMSG_ECC_LOG 0x6a014 +#define TSI721_SMSG_ECC_LOG_MASK 0x00070007 +#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M 0x00070000 +#define TSI721_SMSG_ECC_LOG_ECC_COR_M 0x00000007 + +#define TSI721_RETRY_GEN_CNT 0x6a100 +#define TSI721_RETRY_GEN_CNT_MASK 0xffffffff + +#define TSI721_RETRY_RX_CNT 0x6a104 +#define TSI721_RETRY_RX_CNT_MASK 0xffffffff + +#define TSI721_SMSG_ECC_COR_LOG(x) (0x6a300 + (x) * 4) +#define TSI721_SMSG_ECC_COR_LOG_MASK 0x000000ff + +#define TSI721_SMSG_ECC_NCOR(x) (0x6a340 + (x) * 4) +#define TSI721_SMSG_ECC_NCOR_MASK 0x000000ff + +/* + * Block DMA Descriptors + */ + +struct tsi721_dma_desc { + __le32 type_id; + +#define TSI721_DMAD_DEVID 0x0000ffff +#define TSI721_DMAD_CRF 0x00010000 +#define TSI721_DMAD_PRIO 0x00060000 +#define TSI721_DMAD_RTYPE 0x00780000 +#define TSI721_DMAD_IOF 0x08000000 +#define TSI721_DMAD_DTYPE 0xe0000000 + + __le32 bcount; + +#define TSI721_DMAD_BCOUNT1 0x03ffffff /* if DTYPE == 1 */ +#define TSI721_DMAD_BCOUNT2 0x0000000f /* if DTYPE == 2 */ +#define TSI721_DMAD_TT 0x0c000000 +#define TSI721_DMAD_RADDR0 0xc0000000 + + union { + __le32 raddr_lo; /* if DTYPE == (1 || 2) */ + __le32 next_lo; /* if DTYPE == 3 */ + }; + +#define TSI721_DMAD_CFGOFF 0x00ffffff +#define TSI721_DMAD_HOPCNT 0xff000000 + + union { + __le32 raddr_hi; /* if DTYPE == (1 || 2) */ + __le32 next_hi; /* if DTYPE == 3 */ + }; + + union { + struct { /* if DTYPE == 1 */ + __le32 bufptr_lo; + __le32 bufptr_hi; + __le32 s_dist; + __le32 s_size; + } t1; + __le32 data[4]; /* if DTYPE == 2 */ + u32 reserved[4]; /* if DTYPE == 3 */ + }; +} __aligned(32); + +/* + * Inbound Messaging Descriptor + */ +struct tsi721_imsg_desc { + __le32 type_id; + +#define TSI721_IMD_DEVID 0x0000ffff +#define TSI721_IMD_CRF 0x00010000 +#define TSI721_IMD_PRIO 0x00060000 +#define TSI721_IMD_TT 0x00180000 +#define TSI721_IMD_DTYPE 0xe0000000 + + __le32 msg_info; + +#define TSI721_IMD_BCOUNT 0x00000ff8 +#define TSI721_IMD_SSIZE 0x0000f000 +#define TSI721_IMD_LETER 0x00030000 +#define TSI721_IMD_XMBOX 0x003c0000 +#define TSI721_IMD_MBOX 0x00c00000 +#define TSI721_IMD_CS 0x78000000 +#define TSI721_IMD_HO 0x80000000 + + __le32 bufptr_lo; + __le32 bufptr_hi; + u32 reserved[12]; + +} __aligned(64); + +/* + * Outbound Messaging Descriptor + */ +struct tsi721_omsg_desc { + __le32 type_id; + +#define TSI721_OMD_DEVID 0x0000ffff +#define TSI721_OMD_CRF 0x00010000 +#define TSI721_OMD_PRIO 0x00060000 +#define TSI721_OMD_IOF 0x08000000 +#define TSI721_OMD_DTYPE 0xe0000000 +#define TSI721_OMD_RSRVD 0x17f80000 + + __le32 msg_info; + +#define TSI721_OMD_BCOUNT 0x00000ff8 +#define TSI721_OMD_SSIZE 0x0000f000 +#define TSI721_OMD_LETER 0x00030000 +#define TSI721_OMD_XMBOX 0x003c0000 +#define TSI721_OMD_MBOX 0x00c00000 +#define TSI721_OMD_TT 0x0c000000 + + union { + __le32 bufptr_lo; /* if DTYPE == 4 */ + __le32 next_lo; /* if DTYPE == 5 */ + }; + + union { + __le32 bufptr_hi; /* if DTYPE == 4 */ + __le32 next_hi; /* if DTYPE == 5 */ + }; + +} __aligned(16); + +struct tsi721_dma_sts { + __le64 desc_sts[8]; +} __aligned(64); + +struct tsi721_desc_sts_fifo { + union { + __le64 da64; + struct { + __le32 lo; + __le32 hi; + } da32; + } stat[8]; +} __aligned(64); + +/* Descriptor types for BDMA and Messaging blocks */ +enum dma_dtype { + DTYPE1 = 1, /* Data Transfer DMA Descriptor */ + DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */ + DTYPE3 = 3, /* Block Pointer DMA Descriptor */ + DTYPE4 = 4, /* Outbound Msg DMA Descriptor */ + DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */ + DTYPE6 = 6 /* Inbound Messaging Descriptor */ +}; + +enum dma_rtype { + NREAD = 0, + LAST_NWRITE_R = 1, + ALL_NWRITE = 2, + ALL_NWRITE_R = 3, + MAINT_RD = 4, + MAINT_WR = 5 +}; + +/* + * mport Driver Definitions + */ +#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH + +#define TSI721_DMACH_MAINT 7 /* DMA channel for maint requests */ +#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ + +#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ + +#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) + +enum tsi721_smsg_int_flag { + SMSG_INT_NONE = 0x00000000, + SMSG_INT_ECC_COR_CH = 0x000000ff, + SMSG_INT_ECC_NCOR_CH = 0x0000ff00, + SMSG_INT_ECC_COR = 0x00020000, + SMSG_INT_ECC_NCOR = 0x00040000, + SMSG_INT_UNS_RSP = 0x00800000, + SMSG_INT_ALL = 0x0006ffff +}; + +/* Structures */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1) + +struct tsi721_tx_desc { + struct dma_async_tx_descriptor txd; + u16 destid; + /* low 64-bits of 66-bit RIO address */ + u64 rio_addr; + /* upper 2-bits of 66-bit RIO address */ + u8 rio_addr_u; + enum dma_rtype rtype; + struct list_head desc_node; + struct scatterlist *sg; + unsigned int sg_len; + enum dma_status status; +}; + +struct tsi721_bdma_chan { + int id; + void __iomem *regs; + int bd_num; /* number of HW buffer descriptors */ + void *bd_base; /* start of DMA descriptors */ + dma_addr_t bd_phys; + void *sts_base; /* start of DMA BD status FIFO */ + dma_addr_t sts_phys; + int sts_size; + u32 sts_rdptr; + u32 wr_count; + u32 wr_count_next; + + struct dma_chan dchan; + struct tsi721_tx_desc *tx_desc; + spinlock_t lock; + struct tsi721_tx_desc *active_tx; + struct list_head queue; + struct list_head free_list; + struct tasklet_struct tasklet; + bool active; +}; + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +struct tsi721_bdma_maint { + int ch_id; /* BDMA channel number */ + int bd_num; /* number of buffer descriptors */ + void *bd_base; /* start of DMA descriptors */ + dma_addr_t bd_phys; + void *sts_base; /* start of DMA BD status FIFO */ + dma_addr_t sts_phys; + int sts_size; +}; + +struct tsi721_imsg_ring { + u32 size; + /* VA/PA of data buffers for incoming messages */ + void *buf_base; + dma_addr_t buf_phys; + /* VA/PA of circular free buffer list */ + void *imfq_base; + dma_addr_t imfq_phys; + /* VA/PA of Inbound message descriptors */ + void *imd_base; + dma_addr_t imd_phys; + /* Inbound Queue buffer pointers */ + void *imq_base[TSI721_IMSGD_RING_SIZE]; + + u32 rx_slot; + void *dev_id; + u32 fq_wrptr; + u32 desc_rdptr; + spinlock_t lock; +}; + +struct tsi721_omsg_ring { + u32 size; + /* VA/PA of OB Msg descriptors */ + void *omd_base; + dma_addr_t omd_phys; + /* VA/PA of OB Msg data buffers */ + void *omq_base[TSI721_OMSGD_RING_SIZE]; + dma_addr_t omq_phys[TSI721_OMSGD_RING_SIZE]; + /* VA/PA of OB Msg descriptor status FIFO */ + void *sts_base; + dma_addr_t sts_phys; + u32 sts_size; /* # of allocated status entries */ + u32 sts_rdptr; + + u32 tx_slot; + void *dev_id; + u32 wr_count; + spinlock_t lock; +}; + +enum tsi721_flags { + TSI721_USING_MSI = (1 << 0), + TSI721_USING_MSIX = (1 << 1), + TSI721_IMSGID_SET = (1 << 2), +}; + +#ifdef CONFIG_PCI_MSI +/* + * MSI-X Table Entries (0 ... 69) + */ +#define TSI721_MSIX_DMACH_DONE(x) (0 + (x)) +#define TSI721_MSIX_DMACH_INT(x) (8 + (x)) +#define TSI721_MSIX_BDMA_INT 16 +#define TSI721_MSIX_OMSG_DONE(x) (17 + (x)) +#define TSI721_MSIX_OMSG_INT(x) (25 + (x)) +#define TSI721_MSIX_IMSG_DQ_RCV(x) (33 + (x)) +#define TSI721_MSIX_IMSG_INT(x) (41 + (x)) +#define TSI721_MSIX_MSG_INT 49 +#define TSI721_MSIX_SR2PC_IDBQ_RCV(x) (50 + (x)) +#define TSI721_MSIX_SR2PC_CH_INT(x) (58 + (x)) +#define TSI721_MSIX_SR2PC_INT 66 +#define TSI721_MSIX_PC2SR_INT 67 +#define TSI721_MSIX_SRIO_MAC_INT 68 +#define TSI721_MSIX_I2C_INT 69 + +/* MSI-X vector and init table entry indexes */ +enum tsi721_msix_vect { + TSI721_VECT_IDB, + TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */ + TSI721_VECT_OMB0_DONE, + TSI721_VECT_OMB1_DONE, + TSI721_VECT_OMB2_DONE, + TSI721_VECT_OMB3_DONE, + TSI721_VECT_OMB0_INT, + TSI721_VECT_OMB1_INT, + TSI721_VECT_OMB2_INT, + TSI721_VECT_OMB3_INT, + TSI721_VECT_IMB0_RCV, + TSI721_VECT_IMB1_RCV, + TSI721_VECT_IMB2_RCV, + TSI721_VECT_IMB3_RCV, + TSI721_VECT_IMB0_INT, + TSI721_VECT_IMB1_INT, + TSI721_VECT_IMB2_INT, + TSI721_VECT_IMB3_INT, +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + TSI721_VECT_DMA0_DONE, + TSI721_VECT_DMA1_DONE, + TSI721_VECT_DMA2_DONE, + TSI721_VECT_DMA3_DONE, + TSI721_VECT_DMA4_DONE, + TSI721_VECT_DMA5_DONE, + TSI721_VECT_DMA6_DONE, + TSI721_VECT_DMA7_DONE, + TSI721_VECT_DMA0_INT, + TSI721_VECT_DMA1_INT, + TSI721_VECT_DMA2_INT, + TSI721_VECT_DMA3_INT, + TSI721_VECT_DMA4_INT, + TSI721_VECT_DMA5_INT, + TSI721_VECT_DMA6_INT, + TSI721_VECT_DMA7_INT, +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + TSI721_VECT_MAX +}; + +#define IRQ_DEVICE_NAME_MAX 64 + +struct msix_irq { + u16 vector; + char irq_name[IRQ_DEVICE_NAME_MAX]; +}; +#endif /* CONFIG_PCI_MSI */ + +struct tsi721_ib_win_mapping { + struct list_head node; + dma_addr_t lstart; +}; + +struct tsi721_ib_win { + u64 rstart; + u32 size; + dma_addr_t lstart; + bool active; + bool xlat; + struct list_head mappings; +}; + +struct tsi721_obw_bar { + u64 base; + u64 size; + u64 free; +}; + +struct tsi721_ob_win { + u64 base; + u32 size; + u16 destid; + u64 rstart; + bool active; + struct tsi721_obw_bar *pbar; +}; + +struct tsi721_device { + struct pci_dev *pdev; + struct rio_mport mport; + u32 flags; + void __iomem *regs; +#ifdef CONFIG_PCI_MSI + struct msix_irq msix[TSI721_VECT_MAX]; +#endif + /* Doorbells */ + void __iomem *odb_base; + void *idb_base; + dma_addr_t idb_dma; + struct work_struct idb_work; + u32 db_discard_count; + + /* Inbound Port-Write */ + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; + u32 pw_discard_count; + + /* BDMA Engine */ + struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; +#endif + + /* Inbound Messaging */ + int imsg_init[TSI721_IMSG_CHNUM]; + struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM]; + + /* Outbound Messaging */ + int omsg_init[TSI721_OMSG_CHNUM]; + struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; + + /* Inbound Mapping Windows */ + struct tsi721_ib_win ib_win[TSI721_IBWIN_NUM]; + int ibwin_cnt; + + /* Outbound Mapping Windows */ + struct tsi721_obw_bar p2r_bar[2]; + struct tsi721_ob_win ob_win[TSI721_OBWIN_NUM]; + int obwin_cnt; +}; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); +extern int tsi721_register_dma(struct tsi721_device *priv); +extern void tsi721_unregister_dma(struct tsi721_device *priv); +extern void tsi721_dma_stop_all(struct tsi721_device *priv); +#else +#define tsi721_dma_stop_all(priv) do {} while (0) +#define tsi721_unregister_dma(priv) do {} while (0) +#endif + +#endif diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c new file mode 100644 index 000000000..d375c0205 --- /dev/null +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -0,0 +1,1042 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright (c) 2011-2014 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include "../../dma/dmaengine.h" + +#include "tsi721.h" + +#ifdef CONFIG_PCI_MSI +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); +#endif +static int tsi721_submit_sg(struct tsi721_tx_desc *desc); + +static unsigned int dma_desc_per_channel = 128; +module_param(dma_desc_per_channel, uint, S_IRUGO); +MODULE_PARM_DESC(dma_desc_per_channel, + "Number of DMA descriptors per channel (default: 128)"); + +static unsigned int dma_txqueue_sz = 16; +module_param(dma_txqueue_sz, uint, S_IRUGO); +MODULE_PARM_DESC(dma_txqueue_sz, + "DMA Transactions Queue Size (default: 16)"); + +static u8 dma_sel = 0x7f; +module_param(dma_sel, byte, S_IRUGO); +MODULE_PARM_DESC(dma_sel, + "DMA Channel Selection Mask (default: 0x7f = all)"); + +static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) +{ + return container_of(chan, struct tsi721_bdma_chan, dchan); +} + +static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) +{ + return container_of(ddev, struct rio_mport, dma)->priv; +} + +static inline +struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct tsi721_tx_desc, txd); +} + +static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) +{ + struct tsi721_dma_desc *bd_ptr; + struct device *dev = bdma_chan->dchan.device->dev; + u64 *sts_ptr; + dma_addr_t bd_phys; + dma_addr_t sts_phys; + int sts_size; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); + + /* + * Allocate space for DMA descriptors + * (add an extra element for link descriptor) + */ + bd_ptr = dma_alloc_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_ATOMIC); + if (!bd_ptr) + return -ENOMEM; + + bdma_chan->bd_num = bd_num; + bdma_chan->bd_phys = bd_phys; + bdma_chan->bd_base = bd_ptr; + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d descriptors @ %p (phys = %pad)", + bdma_chan->id, bd_ptr, &bd_phys); + + /* Allocate space for descriptor status FIFO */ + sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? + (bd_num + 1) : TSI721_DMA_MINSTSSZ; + sts_size = roundup_pow_of_two(sts_size); + sts_ptr = dma_alloc_coherent(dev, + sts_size * sizeof(struct tsi721_dma_sts), + &sts_phys, GFP_ATOMIC); + if (!sts_ptr) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + bdma_chan->bd_base = NULL; + return -ENOMEM; + } + + bdma_chan->sts_phys = sts_phys; + bdma_chan->sts_base = sts_ptr; + bdma_chan->sts_size = sts_size; + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", + bdma_chan->id, sts_ptr, &sts_phys, sts_size); + + /* Initialize DMA descriptors ring using added link descriptor */ + bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); + bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & + TSI721_DMAC_DPTRL_MASK); + bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bd_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), + bdma_chan->regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)sts_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DSBH); + iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), + bdma_chan->regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), + bdma_chan->regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + +#ifdef CONFIG_PCI_MSI + /* Request interrupt service if we are in MSI-X mode */ + if (priv->flags & TSI721_USING_MSIX) { + int rc, idx; + + idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "Unable to get MSI-X for DMAC%d-DONE", + bdma_chan->id); + goto err_out; + } + + idx = TSI721_VECT_DMA0_INT + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "Unable to get MSI-X for DMAC%d-INT", + bdma_chan->id); + free_irq( + priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, + (void *)bdma_chan); + } + +err_out: + if (rc) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + bdma_chan->bd_base = NULL; + + /* Free space allocated for status descriptors */ + dma_free_coherent(dev, + sts_size * sizeof(struct tsi721_dma_sts), + sts_ptr, sts_phys); + bdma_chan->sts_base = NULL; + + return -EIO; + } + } +#endif /* CONFIG_PCI_MSI */ + + /* Toggle DMA channel initialization */ + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); + ioread32(bdma_chan->regs + TSI721_DMAC_CTL); + bdma_chan->wr_count = bdma_chan->wr_count_next = 0; + bdma_chan->sts_rdptr = 0; + udelay(10); + + return 0; +} + +static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) +{ + u32 ch_stat; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif + + if (!bdma_chan->bd_base) + return 0; + + /* Check if DMA channel still running */ + ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + if (ch_stat & TSI721_DMAC_STS_RUN) + return -EFAULT; + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, (void *)bdma_chan); + free_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector, (void *)bdma_chan); + } +#endif /* CONFIG_PCI_MSI */ + + /* Free space allocated for DMA descriptors */ + dma_free_coherent(bdma_chan->dchan.device->dev, + (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), + bdma_chan->bd_base, bdma_chan->bd_phys); + bdma_chan->bd_base = NULL; + + /* Free space allocated for status FIFO */ + dma_free_coherent(bdma_chan->dchan.device->dev, + bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), + bdma_chan->sts_base, bdma_chan->sts_phys); + bdma_chan->sts_base = NULL; + return 0; +} + +static void +tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) +{ + if (enable) { + /* Clear pending BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + /* Enable BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INTE); + } else { + /* Disable BDMA channel interrupts */ + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); + /* Clear pending BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + } + +} + +static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) +{ + u32 sts; + + sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + return ((sts & TSI721_DMAC_STS_RUN) == 0); +} + +void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) +{ + /* Disable BDMA channel interrupts */ + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); + if (bdma_chan->active) + tasklet_hi_schedule(&bdma_chan->tasklet); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (BDMA channel structure) + * + * Handles BDMA channel interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) +{ + struct tsi721_bdma_chan *bdma_chan = ptr; + + if (bdma_chan->active) + tasklet_hi_schedule(&bdma_chan->tasklet); + return IRQ_HANDLED; +} +#endif /* CONFIG_PCI_MSI */ + +/* Must be called with the spinlock held */ +static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) +{ + if (!tsi721_dma_is_idle(bdma_chan)) { + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d Attempt to start non-idle channel", + bdma_chan->id); + return; + } + + if (bdma_chan->wr_count == bdma_chan->wr_count_next) { + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d Attempt to start DMA with no BDs ready %d", + bdma_chan->id, task_pid_nr(current)); + return; + } + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", + bdma_chan->id, bdma_chan->wr_count_next, + task_pid_nr(current)); + + iowrite32(bdma_chan->wr_count_next, + bdma_chan->regs + TSI721_DMAC_DWRCNT); + ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); + + bdma_chan->wr_count = bdma_chan->wr_count_next; +} + +static int +tsi721_desc_fill_init(struct tsi721_tx_desc *desc, + struct tsi721_dma_desc *bd_ptr, + struct scatterlist *sg, u32 sys_size) +{ + u64 rio_addr; + + if (!bd_ptr) + return -EINVAL; + + /* Initialize DMA descriptor */ + bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | + (desc->rtype << 19) | desc->destid); + bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | + (sys_size << 26)); + rio_addr = (desc->rio_addr >> 2) | + ((u64)(desc->rio_addr_u & 0x3) << 62); + bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); + bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); + bd_ptr->t1.bufptr_lo = cpu_to_le32( + (u64)sg_dma_address(sg) & 0xffffffff); + bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); + bd_ptr->t1.s_dist = 0; + bd_ptr->t1.s_size = 0; + + return 0; +} + +static int +tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) +{ + if (!bd_ptr) + return -EINVAL; + + /* Update DMA descriptor */ + if (interrupt) + bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); + bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); + + return 0; +} + +static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, + struct tsi721_tx_desc *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->txd; + dma_async_tx_callback callback = txd->callback; + void *param = txd->callback_param; + + list_move(&desc->desc_node, &bdma_chan->free_list); + + if (callback) + callback(param); +} + +static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) +{ + u32 srd_ptr; + u64 *sts_ptr; + int i, j; + + /* Check and clear descriptor status FIFO entries */ + srd_ptr = bdma_chan->sts_rdptr; + sts_ptr = bdma_chan->sts_base; + j = srd_ptr * 8; + while (sts_ptr[j]) { + for (i = 0; i < 8 && sts_ptr[j]; i++, j++) + sts_ptr[j] = 0; + + ++srd_ptr; + srd_ptr %= bdma_chan->sts_size; + j = srd_ptr * 8; + } + + iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); + bdma_chan->sts_rdptr = srd_ptr; +} + +/* Must be called with the channel spinlock held */ +static int tsi721_submit_sg(struct tsi721_tx_desc *desc) +{ + struct dma_chan *dchan = desc->txd.chan; + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + u32 sys_size; + u64 rio_addr; + dma_addr_t next_addr; + u32 bcount; + struct scatterlist *sg; + unsigned int i; + int err = 0; + struct tsi721_dma_desc *bd_ptr = NULL; + u32 idx, rd_idx; + u32 add_count = 0; + struct device *ch_dev = &dchan->dev->device; + + if (!tsi721_dma_is_idle(bdma_chan)) { + tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", + bdma_chan->id); + return -EIO; + } + + /* + * Fill DMA channel's hardware buffer descriptors. + * (NOTE: RapidIO destination address is limited to 64 bits for now) + */ + rio_addr = desc->rio_addr; + next_addr = -1; + bcount = 0; + sys_size = dma_to_mport(dchan->device)->sys_size; + + rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); + rd_idx %= (bdma_chan->bd_num + 1); + + idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); + if (idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + + tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", + bdma_chan->id, rd_idx, idx); + + for_each_sg(desc->sg, sg, desc->sg_len, i) { + + tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", + bdma_chan->id, i, desc->sg_len, + (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); + + if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { + tsi_err(ch_dev, "DMAC%d SG entry %d is too large", + bdma_chan->id, i); + err = -EINVAL; + break; + } + + /* + * If this sg entry forms contiguous block with previous one, + * try to merge it into existing DMA descriptor + */ + if (next_addr == sg_dma_address(sg) && + bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { + /* Adjust byte count of the descriptor */ + bcount += sg_dma_len(sg); + goto entry_done; + } else if (next_addr != -1) { + /* Finalize descriptor using total byte count value */ + tsi721_desc_fill_end(bd_ptr, bcount, 0); + tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", + bdma_chan->id, bcount); + } + + desc->rio_addr = rio_addr; + + if (i && idx == rd_idx) { + tsi_debug(DMAV, ch_dev, + "DMAC%d HW descriptor ring is full @ %d", + bdma_chan->id, i); + desc->sg = sg; + desc->sg_len -= i; + break; + } + + bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; + err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); + if (err) { + tsi_err(ch_dev, "Failed to build desc: err=%d", err); + break; + } + + tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", + bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); + + next_addr = sg_dma_address(sg); + bcount = sg_dma_len(sg); + + add_count++; + if (++idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + +entry_done: + if (sg_is_last(sg)) { + tsi721_desc_fill_end(bd_ptr, bcount, 0); + tsi_debug(DMAV, ch_dev, + "DMAC%d last desc final len: %d", + bdma_chan->id, bcount); + desc->sg_len = 0; + } else { + rio_addr += sg_dma_len(sg); + next_addr += sg_dma_len(sg); + } + } + + if (!err) + bdma_chan->wr_count_next += add_count; + + return err; +} + +static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, + struct tsi721_tx_desc *desc) +{ + int err; + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); + + if (!tsi721_dma_is_idle(bdma_chan)) + return; + + /* + * If there is no data transfer in progress, fetch new descriptor from + * the pending queue. + */ + if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) { + desc = list_first_entry(&bdma_chan->queue, + struct tsi721_tx_desc, desc_node); + list_del_init((&desc->desc_node)); + bdma_chan->active_tx = desc; + } + + if (desc) { + err = tsi721_submit_sg(desc); + if (!err) + tsi721_start_dma(bdma_chan); + else { + tsi721_dma_tx_err(bdma_chan, desc); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d ERR: tsi721_submit_sg failed with err=%d", + bdma_chan->id, err); + } + } + + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", + bdma_chan->id); +} + +static void tsi721_dma_tasklet(unsigned long data) +{ + struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; + u32 dmac_int, dmac_sts; + + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", + bdma_chan->id, dmac_int); + /* Clear channel interrupts */ + iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); + + if (dmac_int & TSI721_DMAC_INT_ERR) { + int i = 10000; + struct tsi721_tx_desc *desc; + + desc = bdma_chan->active_tx; + dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", + bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); + + /* Re-initialize DMA channel if possible */ + + if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) + goto err_out; + + tsi721_clr_stat(bdma_chan); + + spin_lock(&bdma_chan->lock); + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, + bdma_chan->regs + TSI721_DMAC_CTL); + do { + udelay(1); + dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + i--; + } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); + + if (dmac_sts & TSI721_DMAC_STS_ABORT) { + tsi_err(&bdma_chan->dchan.dev->device, + "Failed to re-initiate DMAC%d", bdma_chan->id); + spin_unlock(&bdma_chan->lock); + goto err_out; + } + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bdma_chan->bd_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), + bdma_chan->regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)bdma_chan->sts_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DSBH); + iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), + bdma_chan->regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), + bdma_chan->regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + + bdma_chan->wr_count = bdma_chan->wr_count_next = 0; + bdma_chan->sts_rdptr = 0; + udelay(10); + + desc = bdma_chan->active_tx; + desc->status = DMA_ERROR; + dma_cookie_complete(&desc->txd); + list_add(&desc->desc_node, &bdma_chan->free_list); + bdma_chan->active_tx = NULL; + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, NULL); + spin_unlock(&bdma_chan->lock); + } + + if (dmac_int & TSI721_DMAC_INT_STFULL) { + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d descriptor status FIFO is full", + bdma_chan->id); + } + + if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { + struct tsi721_tx_desc *desc; + + tsi721_clr_stat(bdma_chan); + spin_lock(&bdma_chan->lock); + desc = bdma_chan->active_tx; + + if (desc->sg_len == 0) { + dma_async_tx_callback callback = NULL; + void *param = NULL; + + desc->status = DMA_COMPLETE; + dma_cookie_complete(&desc->txd); + if (desc->txd.flags & DMA_PREP_INTERRUPT) { + callback = desc->txd.callback; + param = desc->txd.callback_param; + } + list_add(&desc->desc_node, &bdma_chan->free_list); + bdma_chan->active_tx = NULL; + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, NULL); + spin_unlock(&bdma_chan->lock); + if (callback) + callback(param); + } else { + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, + bdma_chan->active_tx); + spin_unlock(&bdma_chan->lock); + } + } +err_out: + /* Re-Enable BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); +} + +static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct tsi721_tx_desc *desc = to_tsi721_desc(txd); + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); + dma_cookie_t cookie; + + /* Check if the descriptor is detached from any lists */ + if (!list_empty(&desc->desc_node)) { + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d wrong state of descriptor %p", + bdma_chan->id, txd); + return -EIO; + } + + spin_lock_bh(&bdma_chan->lock); + + if (!bdma_chan->active) { + spin_unlock_bh(&bdma_chan->lock); + return -ENODEV; + } + + cookie = dma_cookie_assign(txd); + desc->status = DMA_IN_PROGRESS; + list_add_tail(&desc->desc_node, &bdma_chan->queue); + tsi721_advance_work(bdma_chan, NULL); + + spin_unlock_bh(&bdma_chan->lock); + return cookie; +} + +static int tsi721_alloc_chan_resources(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc; + int i; + + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); + + if (bdma_chan->bd_base) + return dma_txqueue_sz; + + /* Initialize BDMA channel */ + if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { + tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", + bdma_chan->id); + return -ENODEV; + } + + /* Allocate queue of transaction descriptors */ + desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), + GFP_ATOMIC); + if (!desc) { + tsi721_bdma_ch_free(bdma_chan); + return -ENOMEM; + } + + bdma_chan->tx_desc = desc; + + for (i = 0; i < dma_txqueue_sz; i++) { + dma_async_tx_descriptor_init(&desc[i].txd, dchan); + desc[i].txd.tx_submit = tsi721_tx_submit; + desc[i].txd.flags = DMA_CTRL_ACK; + list_add(&desc[i].desc_node, &bdma_chan->free_list); + } + + dma_cookie_init(dchan); + + bdma_chan->active = true; + tsi721_bdma_interrupt_enable(bdma_chan, 1); + + return dma_txqueue_sz; +} + +static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) +{ + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector); + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector); + } else +#endif + synchronize_irq(priv->pdev->irq); +} + +static void tsi721_free_chan_resources(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); + + if (!bdma_chan->bd_base) + return; + + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + tsi721_sync_dma_irq(bdma_chan); + tasklet_kill(&bdma_chan->tasklet); + INIT_LIST_HEAD(&bdma_chan->free_list); + kfree(bdma_chan->tx_desc); + tsi721_bdma_ch_free(bdma_chan); +} + +static +enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + enum dma_status status; + + spin_lock_bh(&bdma_chan->lock); + status = dma_cookie_status(dchan, cookie, txstate); + spin_unlock_bh(&bdma_chan->lock); + return status; +} + +static void tsi721_issue_pending(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); + + spin_lock_bh(&bdma_chan->lock); + if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { + tsi721_advance_work(bdma_chan, NULL); + } + spin_unlock_bh(&bdma_chan->lock); +} + +static +struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, + struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, unsigned long flags, + void *tinfo) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc; + struct rio_dma_ext *rext = tinfo; + enum dma_rtype rtype; + struct dma_async_tx_descriptor *txd = NULL; + + if (!sgl || !sg_len) { + tsi_err(&dchan->dev->device, "DMAC%d No SG list", + bdma_chan->id); + return ERR_PTR(-EINVAL); + } + + tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + + if (dir == DMA_DEV_TO_MEM) + rtype = NREAD; + else if (dir == DMA_MEM_TO_DEV) { + switch (rext->wr_type) { + case RDW_ALL_NWRITE: + rtype = ALL_NWRITE; + break; + case RDW_ALL_NWRITE_R: + rtype = ALL_NWRITE_R; + break; + case RDW_LAST_NWRITE_R: + default: + rtype = LAST_NWRITE_R; + break; + } + } else { + tsi_err(&dchan->dev->device, + "DMAC%d Unsupported DMA direction option", + bdma_chan->id); + return ERR_PTR(-EINVAL); + } + + spin_lock_bh(&bdma_chan->lock); + + if (!list_empty(&bdma_chan->free_list)) { + desc = list_first_entry(&bdma_chan->free_list, + struct tsi721_tx_desc, desc_node); + list_del_init(&desc->desc_node); + desc->destid = rext->destid; + desc->rio_addr = rext->rio_addr; + desc->rio_addr_u = 0; + desc->rtype = rtype; + desc->sg_len = sg_len; + desc->sg = sgl; + txd = &desc->txd; + txd->flags = flags; + } + + spin_unlock_bh(&bdma_chan->lock); + + if (!txd) { + tsi_debug(DMA, &dchan->dev->device, + "DMAC%d free TXD is not available", bdma_chan->id); + return ERR_PTR(-EBUSY); + } + + return txd; +} + +static int tsi721_terminate_all(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc, *_d; + LIST_HEAD(list); + + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); + + spin_lock_bh(&bdma_chan->lock); + + bdma_chan->active = false; + + while (!tsi721_dma_is_idle(bdma_chan)) { + + udelay(5); +#if (0) + /* make sure to stop the transfer */ + iowrite32(TSI721_DMAC_CTL_SUSP, + bdma_chan->regs + TSI721_DMAC_CTL); + + /* Wait until DMA channel stops */ + do { + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); + } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); +#endif + } + + if (bdma_chan->active_tx) + list_add(&bdma_chan->active_tx->desc_node, &list); + list_splice_init(&bdma_chan->queue, &list); + + list_for_each_entry_safe(desc, _d, &list, desc_node) + tsi721_dma_tx_err(bdma_chan, desc); + + spin_unlock_bh(&bdma_chan->lock); + + return 0; +} + +static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) +{ + if (!bdma_chan->active) + return; + spin_lock_bh(&bdma_chan->lock); + if (!tsi721_dma_is_idle(bdma_chan)) { + int timeout = 100000; + + /* stop the transfer in progress */ + iowrite32(TSI721_DMAC_CTL_SUSP, + bdma_chan->regs + TSI721_DMAC_CTL); + + /* Wait until DMA channel stops */ + while (!tsi721_dma_is_idle(bdma_chan) && --timeout) + udelay(1); + } + + spin_unlock_bh(&bdma_chan->lock); +} + +void tsi721_dma_stop_all(struct tsi721_device *priv) +{ + int i; + + for (i = 0; i < TSI721_DMA_MAXCH; i++) { + if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) + tsi721_dma_stop(&priv->bdma[i]); + } +} + +int tsi721_register_dma(struct tsi721_device *priv) +{ + int i; + int nr_channels = 0; + int err; + struct rio_mport *mport = &priv->mport; + + INIT_LIST_HEAD(&mport->dma.channels); + + for (i = 0; i < TSI721_DMA_MAXCH; i++) { + struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; + + if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) + continue; + + bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); + + bdma_chan->dchan.device = &mport->dma; + bdma_chan->dchan.cookie = 1; + bdma_chan->dchan.chan_id = i; + bdma_chan->id = i; + bdma_chan->active = false; + + spin_lock_init(&bdma_chan->lock); + + bdma_chan->active_tx = NULL; + INIT_LIST_HEAD(&bdma_chan->queue); + INIT_LIST_HEAD(&bdma_chan->free_list); + + tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, + (unsigned long)bdma_chan); + list_add_tail(&bdma_chan->dchan.device_node, + &mport->dma.channels); + nr_channels++; + } + + mport->dma.chancnt = nr_channels; + dma_cap_zero(mport->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); + dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); + + mport->dma.dev = &priv->pdev->dev; + mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; + mport->dma.device_free_chan_resources = tsi721_free_chan_resources; + mport->dma.device_tx_status = tsi721_tx_status; + mport->dma.device_issue_pending = tsi721_issue_pending; + mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; + mport->dma.device_terminate_all = tsi721_terminate_all; + + err = dma_async_device_register(&mport->dma); + if (err) + tsi_err(&priv->pdev->dev, "Failed to register DMA device"); + + return err; +} + +void tsi721_unregister_dma(struct tsi721_device *priv) +{ + struct rio_mport *mport = &priv->mport; + struct dma_chan *chan, *_c; + struct tsi721_bdma_chan *bdma_chan; + + tsi721_dma_stop_all(priv); + dma_async_device_unregister(&mport->dma); + + list_for_each_entry_safe(chan, _c, &mport->dma.channels, + device_node) { + bdma_chan = to_tsi721_chan(chan); + if (bdma_chan->active) { + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + tsi721_sync_dma_irq(bdma_chan); + tasklet_kill(&bdma_chan->tasklet); + INIT_LIST_HEAD(&bdma_chan->free_list); + kfree(bdma_chan->tx_desc); + tsi721_bdma_ch_free(bdma_chan); + } + + list_del(&chan->device_node); + } +} diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c new file mode 100644 index 000000000..f9e10647f --- /dev/null +++ b/drivers/rapidio/rio-access.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO configuration space access support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/rio.h> +#include <linux/module.h> + +#include <linux/rio_drv.h> + +/* + * Wrappers for all RIO configuration access functions. They just check + * alignment and call the low-level functions pointed to by rio_mport->ops. + */ + +#define RIO_8_BAD 0 +#define RIO_16_BAD (offset & 1) +#define RIO_32_BAD (offset & 3) + +/** + * RIO_LOP_READ - Generate rio_local_read_config_* functions + * @size: Size of configuration space read (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space read (1, 2, 4 bytes) + * + * Generates rio_local_read_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_LOP_READ(size,type,len) \ +int __rio_local_read_config_##size \ + (struct rio_mport *mport, u32 offset, type *value) \ +{ \ + int res; \ + u32 data = 0; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ + *value = (type)data; \ + return res; \ +} + +/** + * RIO_LOP_WRITE - Generate rio_local_write_config_* functions + * @size: Size of configuration space write (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space write (1, 2, 4 bytes) + * + * Generates rio_local_write_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_LOP_WRITE(size,type,len) \ +int __rio_local_write_config_##size \ + (struct rio_mport *mport, u32 offset, type value) \ +{ \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + return mport->ops->lcwrite(mport, mport->id, offset, len, value);\ +} + +RIO_LOP_READ(8, u8, 1) +RIO_LOP_READ(16, u16, 2) +RIO_LOP_READ(32, u32, 4) +RIO_LOP_WRITE(8, u8, 1) +RIO_LOP_WRITE(16, u16, 2) +RIO_LOP_WRITE(32, u32, 4) + +EXPORT_SYMBOL_GPL(__rio_local_read_config_8); +EXPORT_SYMBOL_GPL(__rio_local_read_config_16); +EXPORT_SYMBOL_GPL(__rio_local_read_config_32); +EXPORT_SYMBOL_GPL(__rio_local_write_config_8); +EXPORT_SYMBOL_GPL(__rio_local_write_config_16); +EXPORT_SYMBOL_GPL(__rio_local_write_config_32); + +/** + * RIO_OP_READ - Generate rio_mport_read_config_* functions + * @size: Size of configuration space read (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space read (1, 2, 4 bytes) + * + * Generates rio_mport_read_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_OP_READ(size,type,len) \ +int rio_mport_read_config_##size \ + (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ +{ \ + int res; \ + u32 data = 0; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ + *value = (type)data; \ + return res; \ +} + +/** + * RIO_OP_WRITE - Generate rio_mport_write_config_* functions + * @size: Size of configuration space write (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space write (1, 2, 4 bytes) + * + * Generates rio_mport_write_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_OP_WRITE(size,type,len) \ +int rio_mport_write_config_##size \ + (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ +{ \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + return mport->ops->cwrite(mport, mport->id, destid, hopcount, \ + offset, len, value); \ +} + +RIO_OP_READ(8, u8, 1) +RIO_OP_READ(16, u16, 2) +RIO_OP_READ(32, u32, 4) +RIO_OP_WRITE(8, u8, 1) +RIO_OP_WRITE(16, u16, 2) +RIO_OP_WRITE(32, u32, 4) + +EXPORT_SYMBOL_GPL(rio_mport_read_config_8); +EXPORT_SYMBOL_GPL(rio_mport_read_config_16); +EXPORT_SYMBOL_GPL(rio_mport_read_config_32); +EXPORT_SYMBOL_GPL(rio_mport_write_config_8); +EXPORT_SYMBOL_GPL(rio_mport_write_config_16); +EXPORT_SYMBOL_GPL(rio_mport_write_config_32); + +/** + * rio_mport_send_doorbell - Send a doorbell message + * + * @mport: RIO master port + * @destid: RIO device destination ID + * @data: Doorbell message data + * + * Send a doorbell message to a RIO device. The doorbell message + * has a 16-bit info field provided by the data argument. + */ +int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) +{ + return mport->ops->dsend(mport, mport->id, destid, data); +} + +EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c new file mode 100644 index 000000000..728741539 --- /dev/null +++ b/drivers/rapidio/rio-driver.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO driver support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/rio.h> +#include <linux/rio_ids.h> +#include <linux/rio_drv.h> + +#include "rio.h" + +/** + * rio_match_device - Tell if a RIO device has a matching RIO device id structure + * @id: the RIO device id structure to match against + * @rdev: the RIO device structure to match against + * + * Used from driver probe and bus matching to check whether a RIO device + * matches a device id structure provided by a RIO driver. Returns the + * matching &struct rio_device_id or %NULL if there is no match. + */ +static const struct rio_device_id *rio_match_device(const struct rio_device_id + *id, + const struct rio_dev *rdev) +{ + while (id->vid || id->asm_vid) { + if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) && + ((id->did == RIO_ANY_ID) || (id->did == rdev->did)) && + ((id->asm_vid == RIO_ANY_ID) + || (id->asm_vid == rdev->asm_vid)) + && ((id->asm_did == RIO_ANY_ID) + || (id->asm_did == rdev->asm_did))) + return id; + id++; + } + return NULL; +} + +/** + * rio_dev_get - Increments the reference count of the RIO device structure + * + * @rdev: RIO device being referenced + * + * Each live reference to a device should be refcounted. + * + * Drivers for RIO devices should normally record such references in + * their probe() methods, when they bind to a device, and release + * them by calling rio_dev_put(), in their disconnect() methods. + */ +struct rio_dev *rio_dev_get(struct rio_dev *rdev) +{ + if (rdev) + get_device(&rdev->dev); + + return rdev; +} + +/** + * rio_dev_put - Release a use of the RIO device structure + * + * @rdev: RIO device being disconnected + * + * Must be called when a user of a device is finished with it. + * When the last user of the device calls this function, the + * memory of the device is freed. + */ +void rio_dev_put(struct rio_dev *rdev) +{ + if (rdev) + put_device(&rdev->dev); +} + +/** + * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure + * @dev: the RIO device structure to match against + * + * return 0 and set rio_dev->driver when drv claims rio_dev, else error + */ +static int rio_device_probe(struct device *dev) +{ + struct rio_driver *rdrv = to_rio_driver(dev->driver); + struct rio_dev *rdev = to_rio_dev(dev); + int error = -ENODEV; + const struct rio_device_id *id; + + if (!rdev->driver && rdrv->probe) { + if (!rdrv->id_table) + return error; + id = rio_match_device(rdrv->id_table, rdev); + rio_dev_get(rdev); + if (id) + error = rdrv->probe(rdev, id); + if (error >= 0) { + rdev->driver = rdrv; + error = 0; + } else + rio_dev_put(rdev); + } + return error; +} + +/** + * rio_device_remove - Remove a RIO device from the system + * + * @dev: the RIO device structure to match against + * + * Remove a RIO device from the system. If it has an associated + * driver, then run the driver remove() method. Then update + * the reference count. + */ +static int rio_device_remove(struct device *dev) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = rdev->driver; + + if (rdrv) { + if (rdrv->remove) + rdrv->remove(rdev); + rdev->driver = NULL; + } + + rio_dev_put(rdev); + + return 0; +} + +static void rio_device_shutdown(struct device *dev) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = rdev->driver; + + dev_dbg(dev, "RIO: %s\n", __func__); + + if (rdrv && rdrv->shutdown) + rdrv->shutdown(rdev); +} + +/** + * rio_register_driver - register a new RIO driver + * @rdrv: the RIO driver structure to register + * + * Adds a &struct rio_driver to the list of registered drivers. + * Returns a negative value on error, otherwise 0. If no error + * occurred, the driver remains registered even if no device + * was claimed during registration. + */ +int rio_register_driver(struct rio_driver *rdrv) +{ + /* initialize common driver fields */ + rdrv->driver.name = rdrv->name; + rdrv->driver.bus = &rio_bus_type; + + /* register with core */ + return driver_register(&rdrv->driver); +} + +/** + * rio_unregister_driver - unregister a RIO driver + * @rdrv: the RIO driver structure to unregister + * + * Deletes the &struct rio_driver from the list of registered RIO + * drivers, gives it a chance to clean up by calling its remove() + * function for each device it was responsible for, and marks those + * devices as driverless. + */ +void rio_unregister_driver(struct rio_driver *rdrv) +{ + driver_unregister(&rdrv->driver); +} + +void rio_attach_device(struct rio_dev *rdev) +{ + rdev->dev.bus = &rio_bus_type; +} +EXPORT_SYMBOL_GPL(rio_attach_device); + +/** + * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure + * @dev: the standard device structure to match against + * @drv: the standard driver structure containing the ids to match against + * + * Used by a driver to check whether a RIO device present in the + * system is in its list of supported devices. Returns 1 if + * there is a matching &struct rio_device_id or 0 if there is + * no match. + */ +static int rio_match_bus(struct device *dev, struct device_driver *drv) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = to_rio_driver(drv); + const struct rio_device_id *id = rdrv->id_table; + const struct rio_device_id *found_id; + + if (!id) + goto out; + + found_id = rio_match_device(id, rdev); + + if (found_id) + return 1; + + out:return 0; +} + +static int rio_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct rio_dev *rdev; + + if (!dev) + return -ENODEV; + + rdev = to_rio_dev(dev); + if (!rdev) + return -ENODEV; + + if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X", + rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did)) + return -ENOMEM; + return 0; +} + +struct class rio_mport_class = { + .name = "rapidio_port", + .owner = THIS_MODULE, + .dev_groups = rio_mport_groups, +}; +EXPORT_SYMBOL_GPL(rio_mport_class); + +struct bus_type rio_bus_type = { + .name = "rapidio", + .match = rio_match_bus, + .dev_groups = rio_dev_groups, + .bus_groups = rio_bus_groups, + .probe = rio_device_probe, + .remove = rio_device_remove, + .shutdown = rio_device_shutdown, + .uevent = rio_uevent, +}; + +/** + * rio_bus_init - Register the RapidIO bus with the device model + * + * Registers the RIO mport device class and RIO bus type with the Linux + * device model. + */ +static int __init rio_bus_init(void) +{ + int ret; + + ret = class_register(&rio_mport_class); + if (!ret) { + ret = bus_register(&rio_bus_type); + if (ret) + class_unregister(&rio_mport_class); + } + return ret; +} + +postcore_initcall(rio_bus_init); + +EXPORT_SYMBOL_GPL(rio_register_driver); +EXPORT_SYMBOL_GPL(rio_unregister_driver); +EXPORT_SYMBOL_GPL(rio_bus_type); +EXPORT_SYMBOL_GPL(rio_dev_get); +EXPORT_SYMBOL_GPL(rio_dev_put); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c new file mode 100644 index 000000000..fdcf742b2 --- /dev/null +++ b/drivers/rapidio/rio-scan.c @@ -0,0 +1,1156 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO enumeration and discovery support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine <alexandre.bounine@idt.com> + * - Added Port-Write/Error Management initialization and handling + * + * Copyright 2009 Sysgo AG + * Thomas Moll <thomas.moll@sysgo.com> + * - Added Input- Output- enable functionality, to allow full communication + */ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/rio_regs.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/jiffies.h> +#include <linux/slab.h> + +#include "rio.h" + +static void rio_init_em(struct rio_dev *rdev); + +struct rio_id_table { + u16 start; /* logical minimal id */ + u32 max; /* max number of IDs in table */ + spinlock_t lock; + unsigned long table[]; +}; + +static int next_destid = 0; +static int next_comptag = 1; + +/** + * rio_destid_alloc - Allocate next available destID for given network + * @net: RIO network + * + * Returns next available device destination ID for the specified RIO network. + * Marks allocated ID as one in use. + * Returns RIO_INVALID_DESTID if new destID is not available. + */ +static u16 rio_destid_alloc(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; + + spin_lock(&idtab->lock); + destid = find_first_zero_bit(idtab->table, idtab->max); + + if (destid < idtab->max) { + set_bit(destid, idtab->table); + destid += idtab->start; + } else + destid = RIO_INVALID_DESTID; + + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_destid_reserve - Reserve the specified destID + * @net: RIO network + * @destid: destID to reserve + * + * Tries to reserve the specified destID. + * Returns 0 if successful. + */ +static int rio_destid_reserve(struct rio_net *net, u16 destid) +{ + int oldbit; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; + + destid -= idtab->start; + spin_lock(&idtab->lock); + oldbit = test_and_set_bit(destid, idtab->table); + spin_unlock(&idtab->lock); + return oldbit; +} + +/** + * rio_destid_free - free a previously allocated destID + * @net: RIO network + * @destid: destID to free + * + * Makes the specified destID available for use. + */ +static void rio_destid_free(struct rio_net *net, u16 destid) +{ + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; + + destid -= idtab->start; + spin_lock(&idtab->lock); + clear_bit(destid, idtab->table); + spin_unlock(&idtab->lock); +} + +/** + * rio_destid_first - return first destID in use + * @net: RIO network + */ +static u16 rio_destid_first(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; + + spin_lock(&idtab->lock); + destid = find_first_bit(idtab->table, idtab->max); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_destid_next - return next destID in use + * @net: RIO network + * @from: destination ID from which search shall continue + */ +static u16 rio_destid_next(struct rio_net *net, u16 from) +{ + int destid; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; + + spin_lock(&idtab->lock); + destid = find_next_bit(idtab->table, idtab->max, from); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_get_device_id - Get the base/extended device id for a device + * @port: RIO master port + * @destid: Destination ID of device + * @hopcount: Hopcount to device + * + * Reads the base/extended device id from a device. Returns the + * 8/16-bit device ID. + */ +static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result); + + return RIO_GET_DID(port->sys_size, result); +} + +/** + * rio_set_device_id - Set the base/extended device id for a device + * @port: RIO master port + * @destid: Destination ID of device + * @hopcount: Hopcount to device + * @did: Device ID value to be written + * + * Writes the base/extended device id from a device. + */ +static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did) +{ + rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR, + RIO_SET_DID(port->sys_size, did)); +} + +/** + * rio_clear_locks- Release all host locks and signal enumeration complete + * @net: RIO network to run on + * + * Marks the component tag CSR on each device with the enumeration + * complete flag. When complete, it then release the host locks on + * each device. Returns 0 on success or %-EINVAL on failure. + */ +static int rio_clear_locks(struct rio_net *net) +{ + struct rio_mport *port = net->hport; + struct rio_dev *rdev; + u32 result; + int ret = 0; + + /* Release host device id locks */ + rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + printk(KERN_INFO + "RIO: badness when releasing host lock on master port, result %8.8x\n", + result); + ret = -EINVAL; + } + list_for_each_entry(rdev, &net->devices, net_list) { + rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + printk(KERN_INFO + "RIO: badness when releasing host lock on vid %4.4x did %4.4x\n", + rdev->vid, rdev->did); + ret = -EINVAL; + } + + /* Mark device as discovered and enable master */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + &result); + result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER; + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + result); + } + + return ret; +} + +/** + * rio_enum_host- Set host lock and initialize host destination ID + * @port: Master port to issue transaction + * + * Sets the local host master port lock and destination ID register + * with the host device ID value. The host device ID value is provided + * by the platform. Returns %0 on success or %-1 on failure. + */ +static int rio_enum_host(struct rio_mport *port) +{ + u32 result; + + /* Set master port host device id lock */ + rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + + rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != port->host_deviceid) + return -1; + + /* Set master port destid and init destid ctr */ + rio_local_set_device_id(port, port->host_deviceid); + return 0; +} + +/** + * rio_device_has_destid- Test if a device contains a destination ID register + * @port: Master port to issue transaction + * @src_ops: RIO device source operations + * @dst_ops: RIO device destination operations + * + * Checks the provided @src_ops and @dst_ops for the necessary transaction + * capabilities that indicate whether or not a device will implement a + * destination ID register. Returns 1 if true or 0 if false. + */ +static int rio_device_has_destid(struct rio_mport *port, int src_ops, + int dst_ops) +{ + u32 mask = RIO_OPS_READ | RIO_OPS_WRITE | RIO_OPS_ATOMIC_TST_SWP | RIO_OPS_ATOMIC_INC | RIO_OPS_ATOMIC_DEC | RIO_OPS_ATOMIC_SET | RIO_OPS_ATOMIC_CLR; + + return !!((src_ops | dst_ops) & mask); +} + +/** + * rio_release_dev- Frees a RIO device struct + * @dev: LDM device associated with a RIO device struct + * + * Gets the RIO device struct associated a RIO device struct. + * The RIO device struct is freed. + */ +static void rio_release_dev(struct device *dev) +{ + struct rio_dev *rdev; + + rdev = to_rio_dev(dev); + kfree(rdev); +} + +/** + * rio_is_switch- Tests if a RIO device has switch capabilities + * @rdev: RIO device + * + * Gets the RIO device Processing Element Features register + * contents and tests for switch capabilities. Returns 1 if + * the device is a switch or 0 if it is not a switch. + * The RIO device struct is freed. + */ +static int rio_is_switch(struct rio_dev *rdev) +{ + if (rdev->pef & RIO_PEF_SWITCH) + return 1; + return 0; +} + +/** + * rio_setup_device- Allocates and sets up a RIO device + * @net: RIO network + * @port: Master port to send transactions + * @destid: Current destination ID + * @hopcount: Current hopcount + * @do_enum: Enumeration/Discovery mode flag + * + * Allocates a RIO device and configures fields based on configuration + * space contents. If device has a destination ID register, a destination + * ID is either assigned in enumeration mode or read from configuration + * space in discovery mode. If the device has switch capabilities, then + * a switch is allocated and configured appropriately. Returns a pointer + * to a RIO device on success or NULL on failure. + * + */ +static struct rio_dev *rio_setup_device(struct rio_net *net, + struct rio_mport *port, u16 destid, + u8 hopcount, int do_enum) +{ + int ret = 0; + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + int result, rdid; + size_t size; + u32 swpinfo = 0; + + size = sizeof(*rdev); + if (rio_mport_read_config_32(port, destid, hopcount, + RIO_PEF_CAR, &result)) + return NULL; + + if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { + rio_mport_read_config_32(port, destid, hopcount, + RIO_SWP_INFO_CAR, &swpinfo); + if (result & RIO_PEF_SWITCH) + size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); + } + + rdev = kzalloc(size, GFP_KERNEL); + if (!rdev) + return NULL; + + rdev->net = net; + rdev->pef = result; + rdev->swpinfo = swpinfo; + rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR, + &result); + rdev->did = result >> 16; + rdev->vid = result & 0xffff; + rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_INFO_CAR, + &rdev->device_rev); + rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_ID_CAR, + &result); + rdev->asm_did = result >> 16; + rdev->asm_vid = result & 0xffff; + rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, + &result); + rdev->asm_rev = result >> 16; + if (rdev->pef & RIO_PEF_EXT_FEATURES) { + rdev->efptr = result & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, + hopcount, &rdev->phys_rmap); + pr_debug("RIO: %s Register Map %d device\n", + __func__, rdev->phys_rmap); + + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + if (!rdev->em_efptr) + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT_HS); + } + + rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, + &rdev->src_ops); + rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR, + &rdev->dst_ops); + + if (do_enum) { + /* Assign component tag to device */ + if (next_comptag >= 0x10000) { + pr_err("RIO: Component Tag Counter Overflow\n"); + goto cleanup; + } + rio_mport_write_config_32(port, destid, hopcount, + RIO_COMPONENT_TAG_CSR, next_comptag); + rdev->comp_tag = next_comptag++; + rdev->do_enum = true; + } else { + rio_mport_read_config_32(port, destid, hopcount, + RIO_COMPONENT_TAG_CSR, + &rdev->comp_tag); + } + + if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { + if (do_enum) { + rio_set_device_id(port, destid, hopcount, next_destid); + rdev->destid = next_destid; + next_destid = rio_destid_alloc(net); + } else + rdev->destid = rio_get_device_id(port, destid, hopcount); + + rdev->hopcount = 0xff; + } else { + /* Switch device has an associated destID which + * will be adjusted later + */ + rdev->destid = destid; + rdev->hopcount = hopcount; + } + + /* If a PE has both switch and other functions, show it as a switch */ + if (rio_is_switch(rdev)) { + rswitch = rdev->rswitch; + rswitch->port_ok = 0; + spin_lock_init(&rswitch->lock); + rswitch->route_table = + kzalloc(RIO_MAX_ROUTE_ENTRIES(port->sys_size), + GFP_KERNEL); + if (!rswitch->route_table) + goto cleanup; + /* Initialize switch route table */ + for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size); + rdid++) + rswitch->route_table[rdid] = RIO_INVALID_ROUTE; + dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + + if (do_enum) + rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); + } else { + if (do_enum) + /*Enable Input Output Port (transmitter receiver)*/ + rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); + + dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + } + + rdev->dev.parent = &net->dev; + rio_attach_device(rdev); + rdev->dev.release = rio_release_dev; + rdev->dma_mask = DMA_BIT_MASK(32); + rdev->dev.dma_mask = &rdev->dma_mask; + rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) + rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], + 0, 0xffff); + + ret = rio_add_device(rdev); + if (ret) { + if (rswitch) + kfree(rswitch->route_table); + put_device(&rdev->dev); + return NULL; + } + + rio_dev_get(rdev); + + return rdev; + +cleanup: + if (rswitch) + kfree(rswitch->route_table); + + kfree(rdev); + return NULL; +} + +/** + * rio_sport_is_active- Tests if a switch port has an active connection. + * @rdev: RapidIO device object + * @sp: Switch port number + * + * Reads the port error status CSR for a particular switch port to + * determine if the port has an active link. Returns + * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is + * inactive. + */ +static int +rio_sport_is_active(struct rio_dev *rdev, int sp) +{ + u32 result = 0; + + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp), + &result); + + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +/** + * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device + * @port: Master port to send transaction + * @hopcount: Number of hops to the device + * + * Used during enumeration to read the Host Device ID Lock CSR on a + * RIO device. Returns the value of the lock register. + */ +static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount) +{ + u32 result; + + rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + return (u16) (result & 0xffff); +} + +/** + * rio_enum_peer- Recursively enumerate a RIO network through a master port + * @net: RIO network being enumerated + * @port: Master port to send transactions + * @hopcount: Number of hops into the network + * @prev: Previous RIO device connected to the enumerated one + * @prev_port: Port on previous RIO device + * + * Recursively enumerates a RIO network. Transactions are sent via the + * master port passed in @port. + */ +static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, + u8 hopcount, struct rio_dev *prev, int prev_port) +{ + struct rio_dev *rdev; + u32 regval; + int tmp; + + if (rio_mport_chk_dev_access(port, + RIO_ANY_DESTID(port->sys_size), hopcount)) { + pr_debug("RIO: device access check failed\n"); + return -1; + } + + if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) { + pr_debug("RIO: PE already discovered by this host\n"); + /* + * Already discovered by this host. Add it as another + * link to the existing device. + */ + rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, RIO_COMPONENT_TAG_CSR, ®val); + + if (regval) { + rdev = rio_get_comptag((regval & 0xffff), NULL); + + if (rdev && prev && rio_is_switch(prev)) { + pr_debug("RIO: redundant path to %s\n", + rio_name(rdev)); + prev->rswitch->nextdev[prev_port] = rdev; + } + } + + return 0; + } + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + while ((tmp = rio_get_host_deviceid_lock(port, hopcount)) + < port->host_deviceid) { + /* Delay a bit */ + mdelay(1); + /* Attempt to acquire device lock again */ + rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + } + + if (rio_get_host_deviceid_lock(port, hopcount) > port->host_deviceid) { + pr_debug( + "RIO: PE locked by a higher priority host...retreating\n"); + return -1; + } + + /* Setup new RIO device */ + rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size), + hopcount, 1); + if (rdev) { + rdev->prev = prev; + if (prev && rio_is_switch(prev)) + prev->rswitch->nextdev[prev_port] = rdev; + } else + return -1; + + if (rio_is_switch(rdev)) { + int sw_destid; + int cur_destid; + int sw_inport; + u16 destid; + int port_num; + + sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + port->host_deviceid, sw_inport, 0); + rdev->rswitch->route_table[port->host_deviceid] = sw_inport; + + destid = rio_destid_first(net); + while (destid != RIO_INVALID_DESTID && destid < next_destid) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + destid, sw_inport, 0); + rdev->rswitch->route_table[destid] = sw_inport; + } + destid = rio_destid_next(net, destid + 1); + } + pr_debug( + "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", + rio_name(rdev), rdev->vid, rdev->did, + RIO_GET_TOTAL_PORTS(rdev->swpinfo)); + sw_destid = next_destid; + for (port_num = 0; + port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); + port_num++) { + if (sw_inport == port_num) { + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + rdev->rswitch->port_ok |= (1 << port_num); + continue; + } + + cur_destid = next_destid; + + if (rio_sport_is_active(rdev, port_num)) { + pr_debug( + "RIO: scanning device on port %d\n", + port_num); + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + rdev->rswitch->port_ok |= (1 << port_num); + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + RIO_ANY_DESTID(port->sys_size), + port_num, 0); + + if (rio_enum_peer(net, port, hopcount + 1, + rdev, port_num) < 0) + return -1; + + /* Update routing tables */ + destid = rio_destid_next(net, cur_destid + 1); + if (destid != RIO_INVALID_DESTID) { + for (destid = cur_destid; + destid < next_destid;) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, + RIO_GLOBAL_TABLE, + destid, + port_num, + 0); + rdev->rswitch-> + route_table[destid] = + port_num; + } + destid = rio_destid_next(net, + destid + 1); + } + } + } else { + /* If switch supports Error Management, + * set PORT_LOCKOUT bit for unused port + */ + if (rdev->em_efptr) + rio_set_port_lockout(rdev, port_num, 1); + + rdev->rswitch->port_ok &= ~(1 << port_num); + } + } + + /* Direct Port-write messages to the enumeratiing host */ + if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) && + (rdev->em_efptr)) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TGT_DEVID, + (port->host_deviceid << 16) | + (port->sys_size << 15)); + } + + rio_init_em(rdev); + + /* Check for empty switch */ + if (next_destid == sw_destid) + next_destid = rio_destid_alloc(net); + + rdev->destid = sw_destid; + } else + pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", + rio_name(rdev), rdev->vid, rdev->did); + + return 0; +} + +/** + * rio_enum_complete- Tests if enumeration of a network is complete + * @port: Master port to send transaction + * + * Tests the PGCCSR discovered bit for non-zero value (enumeration + * complete flag). Return %1 if enumeration is complete or %0 if + * enumeration is incomplete. + */ +static int rio_enum_complete(struct rio_mport *port) +{ + u32 regval; + + rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR, + ®val); + return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0; +} + +/** + * rio_disc_peer- Recursively discovers a RIO network through a master port + * @net: RIO network being discovered + * @port: Master port to send transactions + * @destid: Current destination ID in network + * @hopcount: Number of hops into the network + * @prev: previous rio_dev + * @prev_port: previous port number + * + * Recursively discovers a RIO network. Transactions are sent via the + * master port passed in @port. + */ +static int +rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, + u8 hopcount, struct rio_dev *prev, int prev_port) +{ + u8 port_num, route_port; + struct rio_dev *rdev; + u16 ndestid; + + /* Setup new RIO device */ + if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) { + rdev->prev = prev; + if (prev && rio_is_switch(prev)) + prev->rswitch->nextdev[prev_port] = rdev; + } else + return -1; + + if (rio_is_switch(rdev)) { + /* Associated destid is how we accessed this switch */ + rdev->destid = destid; + + pr_debug( + "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", + rio_name(rdev), rdev->vid, rdev->did, + RIO_GET_TOTAL_PORTS(rdev->swpinfo)); + for (port_num = 0; + port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); + port_num++) { + if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) + continue; + + if (rio_sport_is_active(rdev, port_num)) { + pr_debug( + "RIO: scanning device on port %d\n", + port_num); + + rio_lock_device(port, destid, hopcount, 1000); + + for (ndestid = 0; + ndestid < RIO_ANY_DESTID(port->sys_size); + ndestid++) { + rio_route_get_entry(rdev, + RIO_GLOBAL_TABLE, + ndestid, + &route_port, 0); + if (route_port == port_num) + break; + } + + if (ndestid == RIO_ANY_DESTID(port->sys_size)) + continue; + rio_unlock_device(port, destid, hopcount); + if (rio_disc_peer(net, port, ndestid, + hopcount + 1, rdev, port_num) < 0) + return -1; + } + } + } else + pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", + rio_name(rdev), rdev->vid, rdev->did); + + return 0; +} + +/** + * rio_mport_is_active- Tests if master port link is active + * @port: Master port to test + * + * Reads the port error status CSR for the master port to + * determine if the port has an active link. Returns + * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active + * or %0 if it is inactive. + */ +static int rio_mport_is_active(struct rio_mport *port) +{ + u32 result = 0; + + rio_local_read_config_32(port, + port->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap), + &result); + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +static void rio_scan_release_net(struct rio_net *net) +{ + pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); + kfree(net->enum_data); +} + +static void rio_scan_release_dev(struct device *dev) +{ + struct rio_net *net; + + net = to_rio_net(dev); + pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); + kfree(net); +} + +/* + * rio_scan_alloc_net - Allocate and configure a new RIO network + * @mport: Master port associated with the RIO network + * @do_enum: Enumeration/Discovery mode flag + * @start: logical minimal start id for new net + * + * Allocates a new RIO network structure and initializes enumerator-specific + * part of it (if required). + * Returns a RIO network pointer on success or %NULL on failure. + */ +static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, + int do_enum, u16 start) +{ + struct rio_net *net; + + net = rio_alloc_net(mport); + + if (net && do_enum) { + struct rio_id_table *idtab; + size_t size; + + size = sizeof(struct rio_id_table) + + BITS_TO_LONGS( + RIO_MAX_ROUTE_ENTRIES(mport->sys_size) + ) * sizeof(long); + + idtab = kzalloc(size, GFP_KERNEL); + + if (idtab == NULL) { + pr_err("RIO: failed to allocate destID table\n"); + rio_free_net(net); + net = NULL; + } else { + net->enum_data = idtab; + net->release = rio_scan_release_net; + idtab->start = start; + idtab->max = RIO_MAX_ROUTE_ENTRIES(mport->sys_size); + spin_lock_init(&idtab->lock); + } + } + + if (net) { + net->id = mport->id; + net->hport = mport; + dev_set_name(&net->dev, "rnet_%d", net->id); + net->dev.parent = &mport->dev; + net->dev.release = rio_scan_release_dev; + rio_add_net(net); + } + + return net; +} + +/** + * rio_update_route_tables- Updates route tables in switches + * @net: RIO network to run update on + * + * For each enumerated device, ensure that each switch in a system + * has correct routing entries. Add routes for devices that where + * unknown during the first enumeration pass through the switch. + */ +static void rio_update_route_tables(struct rio_net *net) +{ + struct rio_dev *rdev, *swrdev; + struct rio_switch *rswitch; + u8 sport; + u16 destid; + + list_for_each_entry(rdev, &net->devices, net_list) { + + destid = rdev->destid; + + list_for_each_entry(rswitch, &net->switches, node) { + + if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) + continue; + + if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { + swrdev = sw_to_rio_dev(rswitch); + + /* Skip if destid ends in empty switch*/ + if (swrdev->destid == destid) + continue; + + sport = RIO_GET_PORT_NUM(swrdev->swpinfo); + + rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, + destid, sport, 0); + rswitch->route_table[destid] = sport; + } + } + } +} + +/** + * rio_init_em - Initializes RIO Error Management (for switches) + * @rdev: RIO device + * + * For each enumerated switch, call device-specific error management + * initialization routine (if supplied by the switch driver). + */ +static void rio_init_em(struct rio_dev *rdev) +{ + if (rio_is_switch(rdev) && (rdev->em_efptr) && + rdev->rswitch->ops && rdev->rswitch->ops->em_init) { + rdev->rswitch->ops->em_init(rdev); + } +} + +/** + * rio_enum_mport- Start enumeration through a master port + * @mport: Master port to send transactions + * @flags: Enumeration control flags + * + * Starts the enumeration process. If somebody has enumerated our + * master port device, then give up. If not and we have an active + * link, then start recursive peer enumeration. Returns %0 if + * enumeration succeeds or %-EBUSY if enumeration fails. + */ +static int rio_enum_mport(struct rio_mport *mport, u32 flags) +{ + struct rio_net *net = NULL; + int rc = 0; + + printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, + mport->name); + + /* + * To avoid multiple start requests (repeat enumeration is not supported + * by this method) check if enumeration/discovery was performed for this + * mport: if mport was added into the list of mports for a net exit + * with error. + */ + if (mport->nnode.next || mport->nnode.prev) + return -EBUSY; + + /* If somebody else enumerated our master port device, bail. */ + if (rio_enum_host(mport) < 0) { + printk(KERN_INFO + "RIO: master port %d device has been enumerated by a remote host\n", + mport->id); + rc = -EBUSY; + goto out; + } + + /* If master port has an active link, allocate net and enum peers */ + if (rio_mport_is_active(mport)) { + net = rio_scan_alloc_net(mport, 1, 0); + if (!net) { + printk(KERN_ERR "RIO: failed to allocate new net\n"); + rc = -ENOMEM; + goto out; + } + + /* reserve mport destID in new net */ + rio_destid_reserve(net, mport->host_deviceid); + + /* Enable Input Output Port (transmitter receiver) */ + rio_enable_rx_tx_port(mport, 1, 0, 0, 0); + + /* Set component tag for host */ + rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, + next_comptag++); + + next_destid = rio_destid_alloc(net); + + if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { + /* A higher priority host won enumeration, bail. */ + printk(KERN_INFO + "RIO: master port %d device has lost enumeration to a remote host\n", + mport->id); + rio_clear_locks(net); + rc = -EBUSY; + goto out; + } + /* free the last allocated destID (unused) */ + rio_destid_free(net, next_destid); + rio_update_route_tables(net); + rio_clear_locks(net); + rio_pw_enable(mport, 1); + } else { + printk(KERN_INFO "RIO: master port %d link inactive\n", + mport->id); + rc = -EINVAL; + } + + out: + return rc; +} + +/** + * rio_build_route_tables- Generate route tables from switch route entries + * @net: RIO network to run route tables scan on + * + * For each switch device, generate a route table by copying existing + * route entries from the switch. + */ +static void rio_build_route_tables(struct rio_net *net) +{ + struct rio_switch *rswitch; + struct rio_dev *rdev; + int i; + u8 sport; + + list_for_each_entry(rswitch, &net->switches, node) { + rdev = sw_to_rio_dev(rswitch); + + rio_lock_device(net->hport, rdev->destid, + rdev->hopcount, 1000); + for (i = 0; + i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); + i++) { + if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, + i, &sport, 0) < 0) + continue; + rswitch->route_table[i] = sport; + } + + rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); + } +} + +/** + * rio_disc_mport- Start discovery through a master port + * @mport: Master port to send transactions + * @flags: discovery control flags + * + * Starts the discovery process. If we have an active link, + * then wait for the signal that enumeration is complete (if wait + * is allowed). + * When enumeration completion is signaled, start recursive + * peer discovery. Returns %0 if discovery succeeds or %-EBUSY + * on failure. + */ +static int rio_disc_mport(struct rio_mport *mport, u32 flags) +{ + struct rio_net *net = NULL; + unsigned long to_end; + + printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, + mport->name); + + /* If master port has an active link, allocate net and discover peers */ + if (rio_mport_is_active(mport)) { + if (rio_enum_complete(mport)) + goto enum_done; + else if (flags & RIO_SCAN_ENUM_NO_WAIT) + return -EAGAIN; + + pr_debug("RIO: wait for enumeration to complete...\n"); + + to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; + while (time_before(jiffies, to_end)) { + if (rio_enum_complete(mport)) + goto enum_done; + msleep(10); + } + + pr_debug("RIO: discovery timeout on mport %d %s\n", + mport->id, mport->name); + goto bail; +enum_done: + pr_debug("RIO: ... enumeration done\n"); + + net = rio_scan_alloc_net(mport, 0, 0); + if (!net) { + printk(KERN_ERR "RIO: Failed to allocate new net\n"); + goto bail; + } + + /* Read DestID assigned by enumerator */ + rio_local_read_config_32(mport, RIO_DID_CSR, + &mport->host_deviceid); + mport->host_deviceid = RIO_GET_DID(mport->sys_size, + mport->host_deviceid); + + if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), + 0, NULL, 0) < 0) { + printk(KERN_INFO + "RIO: master port %d device has failed discovery\n", + mport->id); + goto bail; + } + + rio_build_route_tables(net); + } + + return 0; +bail: + return -EBUSY; +} + +static struct rio_scan rio_scan_ops = { + .owner = THIS_MODULE, + .enumerate = rio_enum_mport, + .discover = rio_disc_mport, +}; + +static bool scan; +module_param(scan, bool, 0); +MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " + "(default = 0)"); + +/** + * rio_basic_attach: + * + * When this enumeration/discovery method is loaded as a module this function + * registers its specific enumeration and discover routines for all available + * RapidIO mport devices. The "scan" command line parameter controls ability of + * the module to start RapidIO enumeration/discovery automatically. + * + * Returns 0 for success or -EIO if unable to register itself. + * + * This enumeration/discovery method cannot be unloaded and therefore does not + * provide a matching cleanup_module routine. + */ + +static int __init rio_basic_attach(void) +{ + if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) + return -EIO; + if (scan) + rio_init_mports(); + return 0; +} + +late_initcall(rio_basic_attach); + +MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c new file mode 100644 index 000000000..f76796024 --- /dev/null +++ b/drivers/rapidio/rio-sysfs.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO sysfs attributes and support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/kernel.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/stat.h> +#include <linux/capability.h> + +#include "rio.h" + +/* Sysfs support */ +#define rio_config_attr(field, format_string) \ +static ssize_t \ +field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + struct rio_dev *rdev = to_rio_dev(dev); \ + \ + return sprintf(buf, format_string, rdev->field); \ +} \ +static DEVICE_ATTR_RO(field); + +rio_config_attr(did, "0x%04x\n"); +rio_config_attr(vid, "0x%04x\n"); +rio_config_attr(device_rev, "0x%08x\n"); +rio_config_attr(asm_did, "0x%04x\n"); +rio_config_attr(asm_vid, "0x%04x\n"); +rio_config_attr(asm_rev, "0x%04x\n"); +rio_config_attr(destid, "0x%04x\n"); +rio_config_attr(hopcount, "0x%02x\n"); + +static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + char *str = buf; + int i; + + for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); + i++) { + if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) + continue; + str += + sprintf(str, "%04x %02x\n", i, + rdev->rswitch->route_table[i]); + } + + return (str - buf); +} +static DEVICE_ATTR_RO(routes); + +static ssize_t lprev_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + return sprintf(buf, "%s\n", + (rdev->prev) ? rio_name(rdev->prev) : "root"); +} +static DEVICE_ATTR_RO(lprev); + +static ssize_t lnext_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + char *str = buf; + int i; + + if (rdev->pef & RIO_PEF_SWITCH) { + for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) { + if (rdev->rswitch->nextdev[i]) + str += sprintf(str, "%s\n", + rio_name(rdev->rswitch->nextdev[i])); + else + str += sprintf(str, "null\n"); + } + } + + return str - buf; +} +static DEVICE_ATTR_RO(lnext); + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n", + rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *rio_dev_attrs[] = { + &dev_attr_did.attr, + &dev_attr_vid.attr, + &dev_attr_device_rev.attr, + &dev_attr_asm_did.attr, + &dev_attr_asm_vid.attr, + &dev_attr_asm_rev.attr, + &dev_attr_lprev.attr, + &dev_attr_destid.attr, + &dev_attr_modalias.attr, + + /* Switch-only attributes */ + &dev_attr_routes.attr, + &dev_attr_lnext.attr, + &dev_attr_hopcount.attr, + NULL, +}; + +static ssize_t +rio_read_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); + unsigned int size = 0x100; + loff_t init_off = off; + u8 *data = (u8 *) buf; + + /* Several chips lock up trying to read undefined config space */ + if (capable(CAP_SYS_ADMIN)) + size = RIO_MAINT_SPACE_SZ; + + if (off >= size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } else { + size = count; + } + + if ((off & 1) && size) { + u8 val; + rio_read_config_8(dev, off, &val); + data[off - init_off] = val; + off++; + size--; + } + + if ((off & 3) && size > 2) { + u16 val; + rio_read_config_16(dev, off, &val); + data[off - init_off] = (val >> 8) & 0xff; + data[off - init_off + 1] = val & 0xff; + off += 2; + size -= 2; + } + + while (size > 3) { + u32 val; + rio_read_config_32(dev, off, &val); + data[off - init_off] = (val >> 24) & 0xff; + data[off - init_off + 1] = (val >> 16) & 0xff; + data[off - init_off + 2] = (val >> 8) & 0xff; + data[off - init_off + 3] = val & 0xff; + off += 4; + size -= 4; + } + + if (size >= 2) { + u16 val; + rio_read_config_16(dev, off, &val); + data[off - init_off] = (val >> 8) & 0xff; + data[off - init_off + 1] = val & 0xff; + off += 2; + size -= 2; + } + + if (size > 0) { + u8 val; + rio_read_config_8(dev, off, &val); + data[off - init_off] = val; + off++; + --size; + } + + return count; +} + +static ssize_t +rio_write_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); + unsigned int size = count; + loff_t init_off = off; + u8 *data = (u8 *) buf; + + if (off >= RIO_MAINT_SPACE_SZ) + return 0; + if (off + count > RIO_MAINT_SPACE_SZ) { + size = RIO_MAINT_SPACE_SZ - off; + count = size; + } + + if ((off & 1) && size) { + rio_write_config_8(dev, off, data[off - init_off]); + off++; + size--; + } + + if ((off & 3) && (size > 2)) { + u16 val = data[off - init_off + 1]; + val |= (u16) data[off - init_off] << 8; + rio_write_config_16(dev, off, val); + off += 2; + size -= 2; + } + + while (size > 3) { + u32 val = data[off - init_off + 3]; + val |= (u32) data[off - init_off + 2] << 8; + val |= (u32) data[off - init_off + 1] << 16; + val |= (u32) data[off - init_off] << 24; + rio_write_config_32(dev, off, val); + off += 4; + size -= 4; + } + + if (size >= 2) { + u16 val = data[off - init_off + 1]; + val |= (u16) data[off - init_off] << 8; + rio_write_config_16(dev, off, val); + off += 2; + size -= 2; + } + + if (size) { + rio_write_config_8(dev, off, data[off - init_off]); + off++; + --size; + } + + return count; +} + +static struct bin_attribute rio_config_attr = { + .attr = { + .name = "config", + .mode = S_IRUGO | S_IWUSR, + }, + .size = RIO_MAINT_SPACE_SZ, + .read = rio_read_config, + .write = rio_write_config, +}; + +static struct bin_attribute *rio_dev_bin_attrs[] = { + &rio_config_attr, + NULL, +}; + +static umode_t rio_dev_is_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct rio_dev *rdev = to_rio_dev(kobj_to_dev(kobj)); + umode_t mode = attr->mode; + + if (!(rdev->pef & RIO_PEF_SWITCH) && + (attr == &dev_attr_routes.attr || + attr == &dev_attr_lnext.attr || + attr == &dev_attr_hopcount.attr)) { + /* + * Hide switch-specific attributes for a non-switch device. + */ + mode = 0; + } + + return mode; +} + +static const struct attribute_group rio_dev_group = { + .attrs = rio_dev_attrs, + .is_visible = rio_dev_is_attr_visible, + .bin_attrs = rio_dev_bin_attrs, +}; + +const struct attribute_group *rio_dev_groups[] = { + &rio_dev_group, + NULL, +}; + +static ssize_t scan_store(struct bus_type *bus, const char *buf, size_t count) +{ + long val; + int rc; + + if (kstrtol(buf, 0, &val) < 0) + return -EINVAL; + + if (val == RIO_MPORT_ANY) { + rc = rio_init_mports(); + goto exit; + } + + if (val < 0 || val >= RIO_MAX_MPORTS) + return -EINVAL; + + rc = rio_mport_scan((int)val); +exit: + if (!rc) + rc = count; + + return rc; +} +static BUS_ATTR_WO(scan); + +static struct attribute *rio_bus_attrs[] = { + &bus_attr_scan.attr, + NULL, +}; + +static const struct attribute_group rio_bus_group = { + .attrs = rio_bus_attrs, +}; + +const struct attribute_group *rio_bus_groups[] = { + &rio_bus_group, + NULL, +}; + +static ssize_t +port_destid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rio_mport *mport = to_rio_mport(dev); + + if (mport) + return sprintf(buf, "0x%04x\n", mport->host_deviceid); + else + return -ENODEV; +} +static DEVICE_ATTR_RO(port_destid); + +static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rio_mport *mport = to_rio_mport(dev); + + if (mport) + return sprintf(buf, "%u\n", mport->sys_size); + else + return -ENODEV; +} +static DEVICE_ATTR_RO(sys_size); + +static struct attribute *rio_mport_attrs[] = { + &dev_attr_port_destid.attr, + &dev_attr_sys_size.attr, + NULL, +}; + +static const struct attribute_group rio_mport_group = { + .attrs = rio_mport_attrs, +}; + +const struct attribute_group *rio_mport_groups[] = { + &rio_mport_group, + NULL, +}; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c new file mode 100644 index 000000000..fcab174e5 --- /dev/null +++ b/drivers/rapidio/rio.c @@ -0,0 +1,2332 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO interconnect services + * (RapidIO Interconnect Specification, http://www.rapidio.org) + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright 2009 - 2013 Integrated Device Technology, Inc. + * Alex Bounine <alexandre.bounine@idt.com> + */ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/rio_regs.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/interrupt.h> + +#include "rio.h" + +/* + * struct rio_pwrite - RIO portwrite event + * @node: Node in list of doorbell events + * @pwcback: Doorbell event callback + * @context: Handler specific context to pass on event + */ +struct rio_pwrite { + struct list_head node; + + int (*pwcback)(struct rio_mport *mport, void *context, + union rio_pw_msg *msg, int step); + void *context; +}; + +MODULE_DESCRIPTION("RapidIO Subsystem Core"); +MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>"); +MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); +MODULE_LICENSE("GPL"); + +static int hdid[RIO_MAX_MPORTS]; +static int ids_num; +module_param_array(hdid, int, &ids_num, 0); +MODULE_PARM_DESC(hdid, + "Destination ID assignment to local RapidIO controllers"); + +static LIST_HEAD(rio_devices); +static LIST_HEAD(rio_nets); +static DEFINE_SPINLOCK(rio_global_list_lock); + +static LIST_HEAD(rio_mports); +static LIST_HEAD(rio_scans); +static DEFINE_MUTEX(rio_mport_list_lock); +static unsigned char next_portid; +static DEFINE_SPINLOCK(rio_mmap_lock); + +/** + * rio_local_get_device_id - Get the base/extended device id for a port + * @port: RIO master port from which to get the deviceid + * + * Reads the base/extended device id from the local device + * implementing the master port. Returns the 8/16-bit device + * id. + */ +u16 rio_local_get_device_id(struct rio_mport *port) +{ + u32 result; + + rio_local_read_config_32(port, RIO_DID_CSR, &result); + + return (RIO_GET_DID(port->sys_size, result)); +} +EXPORT_SYMBOL_GPL(rio_local_get_device_id); + +/** + * rio_query_mport - Query mport device attributes + * @port: mport device to query + * @mport_attr: mport attributes data structure + * + * Returns attributes of specified mport through the + * pointer to attributes data structure. + */ +int rio_query_mport(struct rio_mport *port, + struct rio_mport_attr *mport_attr) +{ + if (!port->ops->query_mport) + return -ENODATA; + return port->ops->query_mport(port, mport_attr); +} +EXPORT_SYMBOL(rio_query_mport); + +/** + * rio_alloc_net- Allocate and initialize a new RIO network data structure + * @mport: Master port associated with the RIO network + * + * Allocates a RIO network structure, initializes per-network + * list heads, and adds the associated master port to the + * network list of associated master ports. Returns a + * RIO network pointer on success or %NULL on failure. + */ +struct rio_net *rio_alloc_net(struct rio_mport *mport) +{ + struct rio_net *net = kzalloc(sizeof(*net), GFP_KERNEL); + + if (net) { + INIT_LIST_HEAD(&net->node); + INIT_LIST_HEAD(&net->devices); + INIT_LIST_HEAD(&net->switches); + INIT_LIST_HEAD(&net->mports); + mport->net = net; + } + return net; +} +EXPORT_SYMBOL_GPL(rio_alloc_net); + +int rio_add_net(struct rio_net *net) +{ + int err; + + err = device_register(&net->dev); + if (err) + return err; + spin_lock(&rio_global_list_lock); + list_add_tail(&net->node, &rio_nets); + spin_unlock(&rio_global_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_net); + +void rio_free_net(struct rio_net *net) +{ + spin_lock(&rio_global_list_lock); + if (!list_empty(&net->node)) + list_del(&net->node); + spin_unlock(&rio_global_list_lock); + if (net->release) + net->release(net); + device_unregister(&net->dev); +} +EXPORT_SYMBOL_GPL(rio_free_net); + +/** + * rio_local_set_device_id - Set the base/extended device id for a port + * @port: RIO master port + * @did: Device ID value to be written + * + * Writes the base/extended device id from a device. + */ +void rio_local_set_device_id(struct rio_mport *port, u16 did) +{ + rio_local_write_config_32(port, RIO_DID_CSR, + RIO_SET_DID(port->sys_size, did)); +} +EXPORT_SYMBOL_GPL(rio_local_set_device_id); + +/** + * rio_add_device- Adds a RIO device to the device model + * @rdev: RIO device + * + * Adds the RIO device to the global device list and adds the RIO + * device to the RIO device list. Creates the generic sysfs nodes + * for an RIO device. + */ +int rio_add_device(struct rio_dev *rdev) +{ + int err; + + atomic_set(&rdev->state, RIO_DEVICE_RUNNING); + err = device_register(&rdev->dev); + if (err) + return err; + + spin_lock(&rio_global_list_lock); + list_add_tail(&rdev->global_list, &rio_devices); + if (rdev->net) { + list_add_tail(&rdev->net_list, &rdev->net->devices); + if (rdev->pef & RIO_PEF_SWITCH) + list_add_tail(&rdev->rswitch->node, + &rdev->net->switches); + } + spin_unlock(&rio_global_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_device); + +/* + * rio_del_device - removes a RIO device from the device model + * @rdev: RIO device + * @state: device state to set during removal process + * + * Removes the RIO device to the kernel device list and subsystem's device list. + * Clears sysfs entries for the removed device. + */ +void rio_del_device(struct rio_dev *rdev, enum rio_device_state state) +{ + pr_debug("RIO: %s: removing %s\n", __func__, rio_name(rdev)); + atomic_set(&rdev->state, state); + spin_lock(&rio_global_list_lock); + list_del(&rdev->global_list); + if (rdev->net) { + list_del(&rdev->net_list); + if (rdev->pef & RIO_PEF_SWITCH) { + list_del(&rdev->rswitch->node); + kfree(rdev->rswitch->route_table); + } + } + spin_unlock(&rio_global_list_lock); + device_unregister(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rio_del_device); + +/** + * rio_request_inb_mbox - request inbound mailbox service + * @mport: RIO master port from which to allocate the mailbox resource + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox number to claim + * @entries: Number of entries in inbound mailbox queue + * @minb: Callback to execute when inbound message is received + * + * Requests ownership of an inbound mailbox resource and binds + * a callback function to the resource. Returns %0 on success. + */ +int rio_request_inb_mbox(struct rio_mport *mport, + void *dev_id, + int mbox, + int entries, + void (*minb) (struct rio_mport * mport, void *dev_id, int mbox, + int slot)) +{ + int rc = -ENOSYS; + struct resource *res; + + if (!mport->ops->open_inb_mbox) + goto out; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (res) { + rio_init_mbox_res(res, mbox, mbox); + + /* Make sure this mailbox isn't in use */ + rc = request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], + res); + if (rc < 0) { + kfree(res); + goto out; + } + + mport->inb_msg[mbox].res = res; + + /* Hook the inbound message callback */ + mport->inb_msg[mbox].mcback = minb; + + rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); + if (rc) { + mport->inb_msg[mbox].mcback = NULL; + mport->inb_msg[mbox].res = NULL; + release_resource(res); + kfree(res); + } + } else + rc = -ENOMEM; + + out: + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_mbox); + +/** + * rio_release_inb_mbox - release inbound mailbox message service + * @mport: RIO master port from which to release the mailbox resource + * @mbox: Mailbox number to release + * + * Releases ownership of an inbound mailbox resource. Returns 0 + * if the request has been satisfied. + */ +int rio_release_inb_mbox(struct rio_mport *mport, int mbox) +{ + int rc; + + if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res) + return -EINVAL; + + mport->ops->close_inb_mbox(mport, mbox); + mport->inb_msg[mbox].mcback = NULL; + + rc = release_resource(mport->inb_msg[mbox].res); + if (rc) + return rc; + + kfree(mport->inb_msg[mbox].res); + mport->inb_msg[mbox].res = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(rio_release_inb_mbox); + +/** + * rio_request_outb_mbox - request outbound mailbox service + * @mport: RIO master port from which to allocate the mailbox resource + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox number to claim + * @entries: Number of entries in outbound mailbox queue + * @moutb: Callback to execute when outbound message is sent + * + * Requests ownership of an outbound mailbox resource and binds + * a callback function to the resource. Returns 0 on success. + */ +int rio_request_outb_mbox(struct rio_mport *mport, + void *dev_id, + int mbox, + int entries, + void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) +{ + int rc = -ENOSYS; + struct resource *res; + + if (!mport->ops->open_outb_mbox) + goto out; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (res) { + rio_init_mbox_res(res, mbox, mbox); + + /* Make sure this outbound mailbox isn't in use */ + rc = request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], + res); + if (rc < 0) { + kfree(res); + goto out; + } + + mport->outb_msg[mbox].res = res; + + /* Hook the inbound message callback */ + mport->outb_msg[mbox].mcback = moutb; + + rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); + if (rc) { + mport->outb_msg[mbox].mcback = NULL; + mport->outb_msg[mbox].res = NULL; + release_resource(res); + kfree(res); + } + } else + rc = -ENOMEM; + + out: + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_outb_mbox); + +/** + * rio_release_outb_mbox - release outbound mailbox message service + * @mport: RIO master port from which to release the mailbox resource + * @mbox: Mailbox number to release + * + * Releases ownership of an inbound mailbox resource. Returns 0 + * if the request has been satisfied. + */ +int rio_release_outb_mbox(struct rio_mport *mport, int mbox) +{ + int rc; + + if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res) + return -EINVAL; + + mport->ops->close_outb_mbox(mport, mbox); + mport->outb_msg[mbox].mcback = NULL; + + rc = release_resource(mport->outb_msg[mbox].res); + if (rc) + return rc; + + kfree(mport->outb_msg[mbox].res); + mport->outb_msg[mbox].res = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(rio_release_outb_mbox); + +/** + * rio_setup_inb_dbell - bind inbound doorbell callback + * @mport: RIO master port to bind the doorbell callback + * @dev_id: Device specific pointer to pass on event + * @res: Doorbell message resource + * @dinb: Callback to execute when doorbell is received + * + * Adds a doorbell resource/callback pair into a port's + * doorbell event list. Returns 0 if the request has been + * satisfied. + */ +static int +rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, + void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, + u16 info)) +{ + struct rio_dbell *dbell = kmalloc(sizeof(*dbell), GFP_KERNEL); + + if (!dbell) + return -ENOMEM; + + dbell->res = res; + dbell->dinb = dinb; + dbell->dev_id = dev_id; + + mutex_lock(&mport->lock); + list_add_tail(&dbell->node, &mport->dbells); + mutex_unlock(&mport->lock); + return 0; +} + +/** + * rio_request_inb_dbell - request inbound doorbell message service + * @mport: RIO master port from which to allocate the doorbell resource + * @dev_id: Device specific pointer to pass on event + * @start: Doorbell info range start + * @end: Doorbell info range end + * @dinb: Callback to execute when doorbell is received + * + * Requests ownership of an inbound doorbell resource and binds + * a callback function to the resource. Returns 0 if the request + * has been satisfied. + */ +int rio_request_inb_dbell(struct rio_mport *mport, + void *dev_id, + u16 start, + u16 end, + void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, + u16 dst, u16 info)) +{ + int rc; + struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); + + if (res) { + rio_init_dbell_res(res, start, end); + + /* Make sure these doorbells aren't in use */ + rc = request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], + res); + if (rc < 0) { + kfree(res); + goto out; + } + + /* Hook the doorbell callback */ + rc = rio_setup_inb_dbell(mport, dev_id, res, dinb); + } else + rc = -ENOMEM; + + out: + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_dbell); + +/** + * rio_release_inb_dbell - release inbound doorbell message service + * @mport: RIO master port from which to release the doorbell resource + * @start: Doorbell info range start + * @end: Doorbell info range end + * + * Releases ownership of an inbound doorbell resource and removes + * callback from the doorbell event list. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) +{ + int rc = 0, found = 0; + struct rio_dbell *dbell; + + mutex_lock(&mport->lock); + list_for_each_entry(dbell, &mport->dbells, node) { + if ((dbell->res->start == start) && (dbell->res->end == end)) { + list_del(&dbell->node); + found = 1; + break; + } + } + mutex_unlock(&mport->lock); + + /* If we can't find an exact match, fail */ + if (!found) { + rc = -EINVAL; + goto out; + } + + /* Release the doorbell resource */ + rc = release_resource(dbell->res); + + /* Free the doorbell event */ + kfree(dbell); + + out: + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_inb_dbell); + +/** + * rio_request_outb_dbell - request outbound doorbell message range + * @rdev: RIO device from which to allocate the doorbell resource + * @start: Doorbell message range start + * @end: Doorbell message range end + * + * Requests ownership of a doorbell message range. Returns a resource + * if the request has been satisfied or %NULL on failure. + */ +struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, + u16 end) +{ + struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL); + + if (res) { + rio_init_dbell_res(res, start, end); + + /* Make sure these doorbells aren't in use */ + if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res) + < 0) { + kfree(res); + res = NULL; + } + } + + return res; +} +EXPORT_SYMBOL_GPL(rio_request_outb_dbell); + +/** + * rio_release_outb_dbell - release outbound doorbell message range + * @rdev: RIO device from which to release the doorbell resource + * @res: Doorbell resource to be freed + * + * Releases ownership of a doorbell message range. Returns 0 if the + * request has been satisfied. + */ +int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) +{ + int rc = release_resource(res); + + kfree(res); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_outb_dbell); + +/** + * rio_add_mport_pw_handler - add port-write message handler into the list + * of mport specific pw handlers + * @mport: RIO master port to bind the portwrite callback + * @context: Handler specific context to pass on event + * @pwcback: Callback to execute when portwrite is received + * + * Returns 0 if the request has been satisfied. + */ +int rio_add_mport_pw_handler(struct rio_mport *mport, void *context, + int (*pwcback)(struct rio_mport *mport, + void *context, union rio_pw_msg *msg, int step)) +{ + struct rio_pwrite *pwrite = kzalloc(sizeof(*pwrite), GFP_KERNEL); + + if (!pwrite) + return -ENOMEM; + + pwrite->pwcback = pwcback; + pwrite->context = context; + mutex_lock(&mport->lock); + list_add_tail(&pwrite->node, &mport->pwrites); + mutex_unlock(&mport->lock); + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler); + +/** + * rio_del_mport_pw_handler - remove port-write message handler from the list + * of mport specific pw handlers + * @mport: RIO master port to bind the portwrite callback + * @context: Registered handler specific context to pass on event + * @pwcback: Registered callback function + * + * Returns 0 if the request has been satisfied. + */ +int rio_del_mport_pw_handler(struct rio_mport *mport, void *context, + int (*pwcback)(struct rio_mport *mport, + void *context, union rio_pw_msg *msg, int step)) +{ + int rc = -EINVAL; + struct rio_pwrite *pwrite; + + mutex_lock(&mport->lock); + list_for_each_entry(pwrite, &mport->pwrites, node) { + if (pwrite->pwcback == pwcback && pwrite->context == context) { + list_del(&pwrite->node); + kfree(pwrite); + rc = 0; + break; + } + } + mutex_unlock(&mport->lock); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_del_mport_pw_handler); + +/** + * rio_request_inb_pwrite - request inbound port-write message service for + * specific RapidIO device + * @rdev: RIO device to which register inbound port-write callback routine + * @pwcback: Callback routine to execute when port-write is received + * + * Binds a port-write callback function to the RapidIO device. + * Returns 0 if the request has been satisfied. + */ +int rio_request_inb_pwrite(struct rio_dev *rdev, + int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) +{ + int rc = 0; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback) + rc = -ENOMEM; + else + rdev->pwcback = pwcback; + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); + +/** + * rio_release_inb_pwrite - release inbound port-write message service + * associated with specific RapidIO device + * @rdev: RIO device which registered for inbound port-write callback + * + * Removes callback from the rio_dev structure. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_pwrite(struct rio_dev *rdev) +{ + int rc = -ENOMEM; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback) { + rdev->pwcback = NULL; + rc = 0; + } + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); + +/** + * rio_pw_enable - Enables/disables port-write handling by a master port + * @mport: Master port associated with port-write handling + * @enable: 1=enable, 0=disable + */ +void rio_pw_enable(struct rio_mport *mport, int enable) +{ + if (mport->ops->pwenable) { + mutex_lock(&mport->lock); + + if ((enable && ++mport->pwe_refcnt == 1) || + (!enable && mport->pwe_refcnt && --mport->pwe_refcnt == 0)) + mport->ops->pwenable(mport, enable); + mutex_unlock(&mport->lock); + } +} +EXPORT_SYMBOL_GPL(rio_pw_enable); + +/** + * rio_map_inb_region -- Map inbound memory region. + * @mport: Master port. + * @local: physical address of memory region to be mapped + * @rbase: RIO base address assigned to this window + * @size: Size of the memory region + * @rflags: Flags for mapping. + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, + u64 rbase, u32 size, u32 rflags) +{ + int rc; + unsigned long flags; + + if (!mport->ops->map_inb) + return -1; + spin_lock_irqsave(&rio_mmap_lock, flags); + rc = mport->ops->map_inb(mport, local, rbase, size, rflags); + spin_unlock_irqrestore(&rio_mmap_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(rio_map_inb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @lstart: physical address of memory region to be unmapped + */ +void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) +{ + unsigned long flags; + if (!mport->ops->unmap_inb) + return; + spin_lock_irqsave(&rio_mmap_lock, flags); + mport->ops->unmap_inb(mport, lstart); + spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_inb_region); + +/** + * rio_map_outb_region -- Map outbound memory region. + * @mport: Master port. + * @destid: destination id window points to + * @rbase: RIO base address window translates to + * @size: Size of the memory region + * @rflags: Flags for mapping. + * @local: physical address of memory region mapped + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, + u32 size, u32 rflags, dma_addr_t *local) +{ + int rc; + unsigned long flags; + + if (!mport->ops->map_outb) + return -ENODEV; + + spin_lock_irqsave(&rio_mmap_lock, flags); + rc = mport->ops->map_outb(mport, destid, rbase, size, + rflags, local); + spin_unlock_irqrestore(&rio_mmap_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_map_outb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @destid: destination id mapping points to + * @rstart: RIO base address window translates to + */ +void rio_unmap_outb_region(struct rio_mport *mport, u16 destid, u64 rstart) +{ + unsigned long flags; + + if (!mport->ops->unmap_outb) + return; + + spin_lock_irqsave(&rio_mmap_lock, flags); + mport->ops->unmap_outb(mport, destid, rstart); + spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_outb_region); + +/** + * rio_mport_get_physefb - Helper function that returns register offset + * for Physical Layer Extended Features Block. + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @rmap: pointer to location to store register map type info + */ +u32 +rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount, u32 *rmap) +{ + u32 ext_ftr_ptr; + u32 ftr_header; + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + + ftr_header = RIO_GET_BLOCK_ID(ftr_header); + switch (ftr_header) { + + case RIO_EFB_SER_EP_ID: + case RIO_EFB_SER_EP_REC_ID: + case RIO_EFB_SER_EP_FREE_ID: + case RIO_EFB_SER_EP_M1_ID: + case RIO_EFB_SER_EP_SW_M1_ID: + case RIO_EFB_SER_EPF_M1_ID: + case RIO_EFB_SER_EPF_SW_M1_ID: + *rmap = 1; + return ext_ftr_ptr; + + case RIO_EFB_SER_EP_M2_ID: + case RIO_EFB_SER_EP_SW_M2_ID: + case RIO_EFB_SER_EPF_M2_ID: + case RIO_EFB_SER_EPF_SW_M2_ID: + *rmap = 2; + return ext_ftr_ptr; + + default: + break; + } + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, + hopcount, ext_ftr_ptr); + } + + return ext_ftr_ptr; +} +EXPORT_SYMBOL_GPL(rio_mport_get_physefb); + +/** + * rio_get_comptag - Begin or continue searching for a RIO device by component tag + * @comp_tag: RIO component tag to match + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @comp_tag, a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. + */ +struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if (rdev->comp_tag == comp_tag) + goto exit; + n = n->next; + } + rdev = NULL; +exit: + spin_unlock(&rio_global_list_lock); + return rdev; +} +EXPORT_SYMBOL_GPL(rio_get_comptag); + +/** + * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to set LOCKOUT bit + * @lock: Operation : set (=1) or clear (=0) + */ +int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) +{ + u32 regval; + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), + ®val); + if (lock) + regval |= RIO_PORT_N_CTL_LOCKOUT; + else + regval &= ~RIO_PORT_N_CTL_LOCKOUT; + + rio_write_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), + regval); + return 0; +} +EXPORT_SYMBOL_GPL(rio_set_port_lockout); + +/** + * rio_enable_rx_tx_port - enable input receiver and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) +{ +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + u32 rmap; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, + hopcount, &rmap); + + if (local) { + rio_local_read_config_32(port, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), + ®val) < 0) + return -EIO; + } + + regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX; + + if (local) { + rio_local_write_config_32(port, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), + regval) < 0) + return -EIO; + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); + + +/** + * rio_chk_dev_route - Validate route to the specified device. + * @rdev: RIO device failed to respond + * @nrdev: Last active device on the route to rdev + * @npnum: nrdev's port number on the route to rdev + * + * Follows a route to the specified RIO device to determine the last available + * device (and corresponding RIO port) on the route. + */ +static int +rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) +{ + u32 result; + int p_port, rc = -EIO; + struct rio_dev *prev = NULL; + + /* Find switch with failed RIO link */ + while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) { + if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) { + prev = rdev->prev; + break; + } + rdev = rdev->prev; + } + + if (!prev) + goto err_out; + + p_port = prev->rswitch->route_table[rdev->destid]; + + if (p_port != RIO_INVALID_ROUTE) { + pr_debug("RIO: link failed on [%s]-P%d\n", + rio_name(prev), p_port); + *nrdev = prev; + *npnum = p_port; + rc = 0; + } else + pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev)); +err_out: + return rc; +} + +/** + * rio_mport_chk_dev_access - Validate access to the specified device. + * @mport: Master port to send transactions + * @destid: Device destination ID in network + * @hopcount: Number of hops into the network + */ +int +rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) +{ + int i = 0; + u32 tmp; + + while (rio_mport_read_config_32(mport, destid, hopcount, + RIO_DEV_ID_CAR, &tmp)) { + i++; + if (i == RIO_MAX_CHK_RETRY) + return -EIO; + mdelay(1); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); + +/** + * rio_chk_dev_access - Validate access to the specified device. + * @rdev: Pointer to RIO device control structure + */ +static int rio_chk_dev_access(struct rio_dev *rdev) +{ + return rio_mport_chk_dev_access(rdev->net->hport, + rdev->destid, rdev->hopcount); +} + +/** + * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and + * returns link-response (if requested). + * @rdev: RIO devive to issue Input-status command + * @pnum: Device port number to issue the command + * @lnkresp: Response from a link partner + */ +static int +rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) +{ + u32 regval; + int checkcount; + + if (lnkresp) { + /* Read from link maintenance response register + * to clear valid bit */ + rio_read_config_32(rdev, + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), + ®val); + udelay(50); + } + + /* Issue Input-status command */ + rio_write_config_32(rdev, + RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum), + RIO_MNT_REQ_CMD_IS); + + /* Exit if the response is not expected */ + if (!lnkresp) + return 0; + + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_read_config_32(rdev, + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) { + *lnkresp = regval; + return 0; + } + } + + return -EIO; +} + +/** + * rio_clr_err_stopped - Clears port Error-stopped states. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to clear errors + * @err_status: port error status (if 0 reads register from device) + * + * TODO: Currently this routine is not compatible with recovery process + * specified for idt_gen3 RapidIO switch devices. It has to be reviewed + * to implement universal recovery process that is compatible full range + * off available devices. + * IDT gen3 switch driver now implements HW-specific error handler that + * issues soft port reset to the port to reset ERR_STOP bits and ackIDs. + */ +static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) +{ + struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; + u32 regval; + u32 far_ackid, far_linkstat, near_ackid; + + if (err_status == 0) + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); + + if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) { + pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); + /* + * Send a Link-Request/Input-Status control symbol + */ + if (rio_get_input_status(rdev, pnum, ®val)) { + pr_debug("RIO_EM: Input-status response timeout\n"); + goto rd_err; + } + + pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n", + pnum, regval); + far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; + far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), + ®val); + pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); + near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; + pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \ + " near_ackID=0x%02x\n", + pnum, far_ackid, far_linkstat, near_ackid); + + /* + * If required, synchronize ackIDs of near and + * far sides. + */ + if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) || + (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) { + /* Align near outstanding/outbound ackIDs with + * far inbound. + */ + rio_write_config_32(rdev, + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), + (near_ackid << 24) | + (far_ackid << 8) | far_ackid); + /* Align far outstanding/outbound ackIDs with + * near inbound. + */ + far_ackid++; + if (!nextdev) { + pr_debug("RIO_EM: nextdev pointer == NULL\n"); + goto rd_err; + } + + rio_write_config_32(nextdev, + RIO_DEV_PORT_N_ACK_STS_CSR(nextdev, + RIO_GET_PORT_NUM(nextdev->swpinfo)), + (far_ackid << 24) | + (near_ackid << 8) | near_ackid); + } +rd_err: + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); + pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); + } + + if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) { + pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); + rio_get_input_status(nextdev, + RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); + udelay(50); + + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); + pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); + } + + return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0; +} + +/** + * rio_inb_pwrite_handler - inbound port-write message handler + * @mport: mport device associated with port-write + * @pw_msg: pointer to inbound port-write message + * + * Processes an inbound port-write message. Returns 0 if the request + * has been satisfied. + */ +int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) +{ + struct rio_dev *rdev; + u32 err_status, em_perrdet, em_ltlerrdet; + int rc, portnum; + struct rio_pwrite *pwrite; + +#ifdef DEBUG_PW + { + u32 i; + + pr_debug("%s: PW to mport_%d:\n", __func__, mport->id); + for (i = 0; i < RIO_PW_MSG_SIZE / sizeof(u32); i = i + 4) { + pr_debug("0x%02x: %08x %08x %08x %08x\n", + i * 4, pw_msg->raw[i], pw_msg->raw[i + 1], + pw_msg->raw[i + 2], pw_msg->raw[i + 3]); + } + } +#endif + + rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); + if (rdev) { + pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + } else { + pr_debug("RIO: %s No matching device for CTag 0x%08x\n", + __func__, pw_msg->em.comptag); + } + + /* Call a device-specific handler (if it is registered for the device). + * This may be the service for endpoints that send device-specific + * port-write messages. End-point messages expected to be handled + * completely by EP specific device driver. + * For switches rc==0 signals that no standard processing required. + */ + if (rdev && rdev->pwcback) { + rc = rdev->pwcback(rdev, pw_msg, 0); + if (rc == 0) + return 0; + } + + mutex_lock(&mport->lock); + list_for_each_entry(pwrite, &mport->pwrites, node) + pwrite->pwcback(mport, pwrite->context, pw_msg, 0); + mutex_unlock(&mport->lock); + + if (!rdev) + return 0; + + /* + * FIXME: The code below stays as it was before for now until we decide + * how to do default PW handling in combination with per-mport callbacks + */ + + portnum = pw_msg->em.is_port & 0xFF; + + /* Check if device and route to it are functional: + * Sometimes devices may send PW message(s) just before being + * powered down (or link being lost). + */ + if (rio_chk_dev_access(rdev)) { + pr_debug("RIO: device access failed - get link partner\n"); + /* Scan route to the device and identify failed link. + * This will replace device and port reported in PW message. + * PW message should not be used after this point. + */ + if (rio_chk_dev_route(rdev, &rdev, &portnum)) { + pr_err("RIO: Route trace for %s failed\n", + rio_name(rdev)); + return -EIO; + } + pw_msg = NULL; + } + + /* For End-point devices processing stops here */ + if (!(rdev->pef & RIO_PEF_SWITCH)) + return 0; + + if (rdev->phys_efptr == 0) { + pr_err("RIO_PW: Bad switch initialization for %s\n", + rio_name(rdev)); + return 0; + } + + /* + * Process the port-write notification from switch + */ + if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) + rdev->rswitch->ops->em_handle(rdev, portnum); + + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + &err_status); + pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); + + if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { + + if (!(rdev->rswitch->port_ok & (1 << portnum))) { + rdev->rswitch->port_ok |= (1 << portnum); + rio_set_port_lockout(rdev, portnum, 0); + /* Schedule Insertion Service */ + pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", + rio_name(rdev), portnum); + } + + /* Clear error-stopped states (if reported). + * Depending on the link partner state, two attempts + * may be needed for successful recovery. + */ + if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) { + if (rio_clr_err_stopped(rdev, portnum, err_status)) + rio_clr_err_stopped(rdev, portnum, 0); + } + } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */ + + if (rdev->rswitch->port_ok & (1 << portnum)) { + rdev->rswitch->port_ok &= ~(1 << portnum); + rio_set_port_lockout(rdev, portnum, 1); + + if (rdev->phys_rmap == 1) { + rio_write_config_32(rdev, + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum), + RIO_PORT_N_ACK_CLEAR); + } else { + rio_write_config_32(rdev, + RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum), + RIO_PORT_N_OB_ACK_CLEAR); + rio_write_config_32(rdev, + RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum), + 0); + } + + /* Schedule Extraction Service */ + pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); + if (em_perrdet) { + pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", + portnum, em_perrdet); + /* Clear EM Port N Error Detect CSR */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); + if (em_ltlerrdet) { + pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", + em_ltlerrdet); + /* Clear EM L/T Layer Error Detect CSR */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); + } + + /* Clear remaining error bits and Port-Write Pending bit */ + rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + err_status); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); + +/** + * rio_mport_get_efb - get pointer to next extended features block + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @from: Offset of current Extended Feature block header (if 0 starts + * from ExtFeaturePtr) + */ +u32 +rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from) +{ + u32 reg_val; + + if (from == 0) { + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, + ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, ®_val); + return reg_val & RIO_EXT_FTR_PTR_MASK; + } else { + if (local) + rio_local_read_config_32(port, from, ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + from, ®_val); + return RIO_GET_BLOCK_ID(reg_val); + } +} +EXPORT_SYMBOL_GPL(rio_mport_get_efb); + +/** + * rio_mport_get_feature - query for devices' extended features + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @ftr: Extended feature code + * + * Tell if a device supports a given RapidIO capability. + * Returns the offset of the requested extended feature + * block within the device's RIO configuration space or + * 0 in case the device does not support it. + */ +u32 +rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, + u8 hopcount, int ftr) +{ + u32 asm_info, ext_ftr_ptr, ftr_header; + + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, &asm_info); + + ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK; + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + if (RIO_GET_BLOCK_ID(ftr_header) == ftr) + return ext_ftr_ptr; + + ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header); + if (!ext_ftr_ptr) + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_get_feature); + +/** + * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did + * @vid: RIO vid to match or %RIO_ANY_ID to match all vids + * @did: RIO did to match or %RIO_ANY_ID to match all dids + * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids + * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @vid, @did, @asm_vid, @asm_did, the reference + * count to the device is incrememted and a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. The reference count for @from is always decremented if it is + * not %NULL. + */ +struct rio_dev *rio_get_asm(u16 vid, u16 did, + u16 asm_vid, u16 asm_did, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + WARN_ON(in_interrupt()); + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if ((vid == RIO_ANY_ID || rdev->vid == vid) && + (did == RIO_ANY_ID || rdev->did == did) && + (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) && + (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did)) + goto exit; + n = n->next; + } + rdev = NULL; + exit: + rio_dev_put(from); + rdev = rio_dev_get(rdev); + spin_unlock(&rio_global_list_lock); + return rdev; +} +EXPORT_SYMBOL_GPL(rio_get_asm); + +/** + * rio_get_device - Begin or continue searching for a RIO device by vid/did + * @vid: RIO vid to match or %RIO_ANY_ID to match all vids + * @did: RIO did to match or %RIO_ANY_ID to match all dids + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @vid and @did, the reference count to the + * device is incrememted and a pointer to its device structure is returned. + * Otherwise, %NULL is returned. A new search is initiated by passing %NULL + * to the @from argument. Otherwise, if @from is not %NULL, searches + * continue from next device on the global list. The reference count for + * @from is always decremented if it is not %NULL. + */ +struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) +{ + return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); +} +EXPORT_SYMBOL_GPL(rio_get_device); + +/** + * rio_std_route_add_entry - Add switch route table entry using standard + * registers defined in RIO specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: destination port for specified destID + */ +static int +rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + } + + udelay(10); + return 0; +} + +/** + * rio_std_route_get_entry - Read switch route table entry (port number) + * associated with specified destID using standard registers defined in RIO + * specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: returned destination port for specified destID + */ +static int +rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + *route_port = (u8)result; + } + + return 0; +} + +/** + * rio_std_route_clr_table - Clear swotch route table using standard registers + * defined in RIO specification rev.1.3. + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + */ +static int +rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 max_destid = 0xff; + u32 i, pef, id_inc = 1, ext_cfg = 0; + u32 port_sel = RIO_INVALID_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &pef); + + if (mport->sys_size) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWITCH_RT_LIMIT, + &max_destid); + max_destid &= RIO_RT_MAX_DESTID; + } + + if (pef & RIO_PEF_EXT_RT) { + ext_cfg = 0x80000000; + id_inc = 4; + port_sel = (RIO_INVALID_ROUTE << 24) | + (RIO_INVALID_ROUTE << 16) | + (RIO_INVALID_ROUTE << 8) | + RIO_INVALID_ROUTE; + } + + for (i = 0; i <= max_destid;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + ext_cfg | i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + port_sel); + i += id_inc; + } + } + + udelay(10); + return 0; +} + +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +int rio_lock_device(struct rio_mport *port, u16 destid, + u8 hopcount, int wait_ms) +{ + u32 result; + int tcnt = 0; + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + while (result != port->host_deviceid) { + if (wait_ms != 0 && tcnt == wait_ms) { + pr_debug("RIO: timeout when locking device %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + /* Delay a bit */ + mdelay(1); + tcnt++; + /* Try to acquire device lock again */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_lock_device); + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + /* Release device lock */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + pr_debug("RIO: badness when releasing device lock %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unlock_device); + +/** + * rio_route_add_entry- Add a route entry to a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Port number to be routed + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific add_entry() method to add a route + * entry into a switch routing table. Otherwise uses standard RT update method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_add_entry(struct rio_dev *rdev, + u16 table, u16 route_destid, u8 route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (!ops || !ops->add_entry) { + rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_add_entry); + +/** + * rio_route_get_entry- Read an entry from a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Pointer to read port number into + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific get_entry() method to fetch a route + * entry from a switch routing table. Otherwise uses standard RT read method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_get_entry(struct rio_dev *rdev, u16 table, + u16 route_destid, u8 *route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (!ops || !ops->get_entry) { + rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_get_entry); + +/** + * rio_route_clr_table - Clear a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific clr_table() method to clear a switch + * routing table. Otherwise uses standard RT write method as defined by RapidIO + * specification. A specific routing table can be selected using the @table + * argument if a switch has per port routing tables or the standard (or global) + * table may be used by passing %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (!ops || !ops->clr_table) { + rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + } else if (try_module_get(ops->owner)) { + rc = ops->clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_clr_table); + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +static bool rio_chan_filter(struct dma_chan *chan, void *arg) +{ + struct rio_mport *mport = arg; + + /* Check that DMA device belongs to the right MPORT */ + return mport == container_of(chan->device, struct rio_mport, dma); +} + +/** + * rio_request_mport_dma - request RapidIO capable DMA channel associated + * with specified local RapidIO mport device. + * @mport: RIO mport to perform DMA data transfers + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + return dma_request_channel(mask, rio_chan_filter, mport); +} +EXPORT_SYMBOL_GPL(rio_request_mport_dma); + +/** + * rio_request_dma - request RapidIO capable DMA channel that supports + * specified target RapidIO device. + * @rdev: RIO device associated with DMA transfer + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_dma(struct rio_dev *rdev) +{ + return rio_request_mport_dma(rdev->net->hport); +} +EXPORT_SYMBOL_GPL(rio_request_dma); + +/** + * rio_release_dma - release specified DMA channel + * @dchan: DMA channel to release + */ +void rio_release_dma(struct dma_chan *dchan) +{ + dma_release_channel(dchan); +} +EXPORT_SYMBOL_GPL(rio_release_dma); + +/** + * rio_dma_prep_xfer - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @dchan: DMA channel to configure + * @destid: target RapidIO device destination ID + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * + * Returns: pointer to DMA transaction descriptor if successful, + * error-valued pointer or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, + u16 destid, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + struct rio_dma_ext rio_ext; + + if (!dchan->device->device_prep_slave_sg) { + pr_err("%s: prep_rio_sg == NULL\n", __func__); + return NULL; + } + + rio_ext.destid = destid; + rio_ext.rio_addr_u = data->rio_addr_u; + rio_ext.rio_addr = data->rio_addr; + rio_ext.wr_type = data->wr_type; + + return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, + direction, flags, &rio_ext); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); + +/** + * rio_dma_prep_slave_sg - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @rdev: RIO device control structure + * @dchan: DMA channel to configure + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * + * Returns: pointer to DMA transaction descriptor if successful, + * error-valued pointer or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, + struct dma_chan *dchan, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/** + * rio_find_mport - find RIO mport by its ID + * @mport_id: number (ID) of mport device + * + * Given a RIO mport number, the desired mport is located + * in the global list of mports. If the mport is found, a pointer to its + * data structure is returned. If no mport is found, %NULL is returned. + */ +struct rio_mport *rio_find_mport(int mport_id) +{ + struct rio_mport *port; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + port = NULL; +found: + mutex_unlock(&rio_mport_list_lock); + + return port; +} + +/** + * rio_register_scan - enumeration/discovery method registration interface + * @mport_id: mport device ID for which fabric scan routine has to be set + * (RIO_MPORT_ANY = set for all available mports) + * @scan_ops: enumeration/discovery operations structure + * + * Registers enumeration/discovery operations with RapidIO subsystem and + * attaches it to the specified mport device (or all available mports + * if RIO_MPORT_ANY is specified). + * + * Returns error if the mport already has an enumerator attached to it. + * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). + */ +int rio_register_scan(int mport_id, struct rio_scan *scan_ops) +{ + struct rio_mport *port; + struct rio_scan_node *scan; + int rc = 0; + + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + + if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || + !scan_ops) + return -EINVAL; + + mutex_lock(&rio_mport_list_lock); + + /* + * Check if there is another enumerator already registered for + * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators + * for the same mport ID are not supported. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + rc = -EBUSY; + goto err_out; + } + } + + /* + * Allocate and initialize new scan registration node. + */ + scan = kzalloc(sizeof(*scan), GFP_KERNEL); + if (!scan) { + rc = -ENOMEM; + goto err_out; + } + + scan->mport_id = mport_id; + scan->ops = scan_ops; + + /* + * Traverse the list of registered mports to attach this new scan. + * + * The new scan with matching mport ID overrides any previously attached + * scan assuming that old scan (if any) is the default one (based on the + * enumerator registration check above). + * If the new scan is the global one, it will be attached only to mports + * that do not have their own individual operations already attached. + */ + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) { + port->nscan = scan_ops; + break; + } else if (mport_id == RIO_MPORT_ANY && !port->nscan) + port->nscan = scan_ops; + } + + list_add_tail(&scan->node, &rio_scans); + +err_out: + mutex_unlock(&rio_mport_list_lock); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_register_scan); + +/** + * rio_unregister_scan - removes enumeration/discovery method from mport + * @mport_id: mport device ID for which fabric scan routine has to be + * unregistered (RIO_MPORT_ANY = apply to all mports that use + * the specified scan_ops) + * @scan_ops: enumeration/discovery operations structure + * + * Removes enumeration or discovery method assigned to the specified mport + * device. If RIO_MPORT_ANY is specified, removes the specified operations from + * all mports that have them attached. + */ +int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) +{ + struct rio_mport *port; + struct rio_scan_node *scan; + + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + + if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + mutex_lock(&rio_mport_list_lock); + + list_for_each_entry(port, &rio_mports, node) + if (port->id == mport_id || + (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) + port->nscan = NULL; + + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + list_del(&scan->node); + kfree(scan); + break; + } + } + + mutex_unlock(&rio_mport_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unregister_scan); + +/** + * rio_mport_scan - execute enumeration/discovery on the specified mport + * @mport_id: number (ID) of mport device + */ +int rio_mport_scan(int mport_id) +{ + struct rio_mport *port = NULL; + int rc; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; +found: + if (!port->nscan) { + mutex_unlock(&rio_mport_list_lock); + return -EINVAL; + } + + if (!try_module_get(port->nscan->owner)) { + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; + } + + mutex_unlock(&rio_mport_list_lock); + + if (port->host_deviceid >= 0) + rc = port->nscan->enumerate(port, 0); + else + rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); + + module_put(port->nscan->owner); + return rc; +} + +static void rio_fixup_device(struct rio_dev *dev) +{ +} + +static int rio_init(void) +{ + struct rio_dev *dev = NULL; + + while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) { + rio_fixup_device(dev); + } + return 0; +} + +static struct workqueue_struct *rio_wq; + +struct rio_disc_work { + struct work_struct work; + struct rio_mport *mport; +}; + +static void disc_work_handler(struct work_struct *_work) +{ + struct rio_disc_work *work; + + work = container_of(_work, struct rio_disc_work, work); + pr_debug("RIO: discovery work for mport %d %s\n", + work->mport->id, work->mport->name); + if (try_module_get(work->mport->nscan->owner)) { + work->mport->nscan->discover(work->mport, 0); + module_put(work->mport->nscan->owner); + } +} + +int rio_init_mports(void) +{ + struct rio_mport *port; + struct rio_disc_work *work; + int n = 0; + + if (!next_portid) + return -ENODEV; + + /* + * First, run enumerations and check if we need to perform discovery + * on any of the registered mports. + */ + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->host_deviceid >= 0) { + if (port->nscan && try_module_get(port->nscan->owner)) { + port->nscan->enumerate(port, 0); + module_put(port->nscan->owner); + } + } else + n++; + } + mutex_unlock(&rio_mport_list_lock); + + if (!n) + goto no_disc; + + /* + * If we have mports that require discovery schedule a discovery work + * for each of them. If the code below fails to allocate needed + * resources, exit without error to keep results of enumeration + * process (if any). + * TODO: Implement restart of discovery process for all or + * individual discovering mports. + */ + rio_wq = alloc_workqueue("riodisc", 0, 0); + if (!rio_wq) { + pr_err("RIO: unable allocate rio_wq\n"); + goto no_disc; + } + + work = kcalloc(n, sizeof *work, GFP_KERNEL); + if (!work) { + destroy_workqueue(rio_wq); + goto no_disc; + } + + n = 0; + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->host_deviceid < 0 && port->nscan) { + work[n].mport = port; + INIT_WORK(&work[n].work, disc_work_handler); + queue_work(rio_wq, &work[n].work); + n++; + } + } + + flush_workqueue(rio_wq); + mutex_unlock(&rio_mport_list_lock); + pr_debug("RIO: destroy discovery workqueue\n"); + destroy_workqueue(rio_wq); + kfree(work); + +no_disc: + rio_init(); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_init_mports); + +static int rio_get_hdid(int index) +{ + if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) + return -1; + + return hdid[index]; +} + +int rio_mport_initialize(struct rio_mport *mport) +{ + if (next_portid >= RIO_MAX_MPORTS) { + pr_err("RIO: reached specified max number of mports\n"); + return -ENODEV; + } + + atomic_set(&mport->state, RIO_DEVICE_INITIALIZING); + mport->id = next_portid++; + mport->host_deviceid = rio_get_hdid(mport->id); + mport->nscan = NULL; + mutex_init(&mport->lock); + mport->pwe_refcnt = 0; + INIT_LIST_HEAD(&mport->pwrites); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_initialize); + +int rio_register_mport(struct rio_mport *port) +{ + struct rio_scan_node *scan = NULL; + int res = 0; + + mutex_lock(&rio_mport_list_lock); + + /* + * Check if there are any registered enumeration/discovery operations + * that have to be attached to the added mport. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (port->id == scan->mport_id || + scan->mport_id == RIO_MPORT_ANY) { + port->nscan = scan->ops; + if (port->id == scan->mport_id) + break; + } + } + + list_add_tail(&port->node, &rio_mports); + mutex_unlock(&rio_mport_list_lock); + + dev_set_name(&port->dev, "rapidio%d", port->id); + port->dev.class = &rio_mport_class; + atomic_set(&port->state, RIO_DEVICE_RUNNING); + + res = device_register(&port->dev); + if (res) { + dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", + port->id, res); + mutex_lock(&rio_mport_list_lock); + list_del(&port->node); + mutex_unlock(&rio_mport_list_lock); + put_device(&port->dev); + } else { + dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); + } + + return res; +} +EXPORT_SYMBOL_GPL(rio_register_mport); + +static int rio_mport_cleanup_callback(struct device *dev, void *data) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + if (dev->bus == &rio_bus_type) + rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); + return 0; +} + +static int rio_net_remove_children(struct rio_net *net) +{ + /* + * Unregister all RapidIO devices residing on this net (this will + * invoke notification of registered subsystem interfaces as well). + */ + device_for_each_child(&net->dev, NULL, rio_mport_cleanup_callback); + return 0; +} + +int rio_unregister_mport(struct rio_mport *port) +{ + pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); + + /* Transition mport to the SHUTDOWN state */ + if (atomic_cmpxchg(&port->state, + RIO_DEVICE_RUNNING, + RIO_DEVICE_SHUTDOWN) != RIO_DEVICE_RUNNING) { + pr_err("RIO: %s unexpected state transition for mport %s\n", + __func__, port->name); + } + + if (port->net && port->net->hport == port) { + rio_net_remove_children(port->net); + rio_free_net(port->net); + } + + /* + * Unregister all RapidIO devices attached to this mport (this will + * invoke notification of registered subsystem interfaces as well). + */ + mutex_lock(&rio_mport_list_lock); + list_del(&port->node); + mutex_unlock(&rio_mport_list_lock); + device_unregister(&port->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unregister_mport); diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h new file mode 100644 index 000000000..f482de0d0 --- /dev/null +++ b/drivers/rapidio/rio.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RapidIO interconnect services + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/rio.h> + +#define RIO_MAX_CHK_RETRY 3 +#define RIO_MPORT_ANY (-1) + +/* Functions internal to the RIO core code */ + +extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, + u8 hopcount, int ftr); +extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount, u32 *rmap); +extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from); +extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, + u8 hopcount); +extern int rio_lock_device(struct rio_mport *port, u16 destid, + u8 hopcount, int wait_ms); +extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount); +extern int rio_route_add_entry(struct rio_dev *rdev, + u16 table, u16 route_destid, u8 route_port, int lock); +extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, + u16 route_destid, u8 *route_port, int lock); +extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock); +extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); +extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); +extern struct rio_net *rio_alloc_net(struct rio_mport *mport); +extern int rio_add_net(struct rio_net *net); +extern void rio_free_net(struct rio_net *net); +extern int rio_add_device(struct rio_dev *rdev); +extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state); +extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u8 port_num); +extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); +extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); +extern void rio_attach_device(struct rio_dev *rdev); +extern struct rio_mport *rio_find_mport(int mport_id); +extern int rio_mport_scan(int mport_id); + +/* Structures internal to the RIO core code */ +extern const struct attribute_group *rio_dev_groups[]; +extern const struct attribute_group *rio_bus_groups[]; +extern const struct attribute_group *rio_mport_groups[]; + +#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) +#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c new file mode 100644 index 000000000..db4c26528 --- /dev/null +++ b/drivers/rapidio/rio_cm.c @@ -0,0 +1,2376 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * rio_cm - RapidIO Channelized Messaging Driver + * + * Copyright 2013-2016 Integrated Device Technology, Inc. + * Copyright (c) 2015, Prodrive Technologies + * Copyright (c) 2015, RapidIO Trade Association + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/reboot.h> +#include <linux/bitops.h> +#include <linux/printk.h> +#include <linux/rio_cm_cdev.h> + +#define DRV_NAME "rio_cm" +#define DRV_VERSION "1.0.0" +#define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" +#define DRV_DESC "RapidIO Channelized Messaging Driver" +#define DEV_NAME "rio_cm" + +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_RDEV = BIT(3), /* RapidIO device add/remove */ + DBG_CHOP = BIT(4), /* channel operations */ + DBG_WAIT = BIT(5), /* waiting for events */ + DBG_TX = BIT(6), /* message TX */ + DBG_TX_EVENT = BIT(7), /* message TX event */ + DBG_RX_DATA = BIT(8), /* inbound data messages */ + DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +#define riocm_debug(level, fmt, arg...) \ + do { \ + if (DBG_##level & dbg_level) \ + pr_debug(DRV_NAME ": %s " fmt "\n", \ + __func__, ##arg); \ + } while (0) +#else +#define riocm_debug(level, fmt, arg...) \ + no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) +#endif + +#define riocm_warn(fmt, arg...) \ + pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) + +#define riocm_error(fmt, arg...) \ + pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) + + +static int cmbox = 1; +module_param(cmbox, int, S_IRUGO); +MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); + +static int chstart = 256; +module_param(chstart, int, S_IRUGO); +MODULE_PARM_DESC(chstart, + "Start channel number for dynamic allocation (default 256)"); + +#ifdef DEBUG +static u32 dbg_level = DBG_NONE; +module_param(dbg_level, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define RIOCM_TX_RING_SIZE 128 +#define RIOCM_RX_RING_SIZE 128 +#define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ + +#define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ +#define RIOCM_CHNUM_AUTO 0 +#define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ + +enum rio_cm_state { + RIO_CM_IDLE, + RIO_CM_CONNECT, + RIO_CM_CONNECTED, + RIO_CM_DISCONNECT, + RIO_CM_CHAN_BOUND, + RIO_CM_LISTEN, + RIO_CM_DESTROYING, +}; + +enum rio_cm_pkt_type { + RIO_CM_SYS = 0xaa, + RIO_CM_CHAN = 0x55, +}; + +enum rio_cm_chop { + CM_CONN_REQ, + CM_CONN_ACK, + CM_CONN_CLOSE, + CM_DATA_MSG, +}; + +struct rio_ch_base_bhdr { + u32 src_id; + u32 dst_id; +#define RIO_HDR_LETTER_MASK 0xffff0000 +#define RIO_HDR_MBOX_MASK 0x0000ffff + u8 src_mbox; + u8 dst_mbox; + u8 type; +} __attribute__((__packed__)); + +struct rio_ch_chan_hdr { + struct rio_ch_base_bhdr bhdr; + u8 ch_op; + u16 dst_ch; + u16 src_ch; + u16 msg_len; + u16 rsrvd; +} __attribute__((__packed__)); + +struct tx_req { + struct list_head node; + struct rio_dev *rdev; + void *buffer; + size_t len; +}; + +struct cm_dev { + struct list_head list; + struct rio_mport *mport; + void *rx_buf[RIOCM_RX_RING_SIZE]; + int rx_slots; + struct mutex rx_lock; + + void *tx_buf[RIOCM_TX_RING_SIZE]; + int tx_slot; + int tx_cnt; + int tx_ack_slot; + struct list_head tx_reqs; + spinlock_t tx_lock; + + struct list_head peers; + u32 npeers; + struct workqueue_struct *rx_wq; + struct work_struct rx_work; +}; + +struct chan_rx_ring { + void *buf[RIOCM_RX_RING_SIZE]; + int head; + int tail; + int count; + + /* Tracking RX buffers reported to upper level */ + void *inuse[RIOCM_RX_RING_SIZE]; + int inuse_cnt; +}; + +struct rio_channel { + u16 id; /* local channel ID */ + struct kref ref; /* channel refcount */ + struct file *filp; + struct cm_dev *cmdev; /* associated CM device object */ + struct rio_dev *rdev; /* remote RapidIO device */ + enum rio_cm_state state; + int error; + spinlock_t lock; + void *context; + u32 loc_destid; /* local destID */ + u32 rem_destid; /* remote destID */ + u16 rem_channel; /* remote channel ID */ + struct list_head accept_queue; + struct list_head ch_node; + struct completion comp; + struct completion comp_close; + struct chan_rx_ring rx_ring; +}; + +struct cm_peer { + struct list_head node; + struct rio_dev *rdev; +}; + +struct rio_cm_work { + struct work_struct work; + struct cm_dev *cm; + void *data; +}; + +struct conn_req { + struct list_head node; + u32 destid; /* requester destID */ + u16 chan; /* requester channel ID */ + struct cm_dev *cmdev; +}; + +/* + * A channel_dev structure represents a CM_CDEV + * @cdev Character device + * @dev Associated device object + */ +struct channel_dev { + struct cdev cdev; + struct device *dev; +}; + +static struct rio_channel *riocm_ch_alloc(u16 ch_num); +static void riocm_ch_free(struct kref *ref); +static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len); +static int riocm_ch_close(struct rio_channel *ch); + +static DEFINE_SPINLOCK(idr_lock); +static DEFINE_IDR(ch_idr); + +static LIST_HEAD(cm_dev_list); +static DECLARE_RWSEM(rdev_sem); + +static struct class *dev_class; +static unsigned int dev_major; +static unsigned int dev_minor_base; +static dev_t dev_number; +static struct channel_dev riocm_cdev; + +#define is_msg_capable(src_ops, dst_ops) \ + ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ + (dst_ops & RIO_DST_OPS_DATA_MSG)) +#define dev_cm_capable(dev) \ + is_msg_capable(dev->src_ops, dev->dst_ops) + +static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) +{ + int ret; + + spin_lock_bh(&ch->lock); + ret = (ch->state == cmp); + spin_unlock_bh(&ch->lock); + return ret; +} + +static int riocm_cmp_exch(struct rio_channel *ch, + enum rio_cm_state cmp, enum rio_cm_state exch) +{ + int ret; + + spin_lock_bh(&ch->lock); + ret = (ch->state == cmp); + if (ret) + ch->state = exch; + spin_unlock_bh(&ch->lock); + return ret; +} + +static enum rio_cm_state riocm_exch(struct rio_channel *ch, + enum rio_cm_state exch) +{ + enum rio_cm_state old; + + spin_lock_bh(&ch->lock); + old = ch->state; + ch->state = exch; + spin_unlock_bh(&ch->lock); + return old; +} + +static struct rio_channel *riocm_get_channel(u16 nr) +{ + struct rio_channel *ch; + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, nr); + if (ch) + kref_get(&ch->ref); + spin_unlock_bh(&idr_lock); + return ch; +} + +static void riocm_put_channel(struct rio_channel *ch) +{ + kref_put(&ch->ref, riocm_ch_free); +} + +static void *riocm_rx_get_msg(struct cm_dev *cm) +{ + void *msg; + int i; + + msg = rio_get_inb_message(cm->mport, cmbox); + if (msg) { + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (cm->rx_buf[i] == msg) { + cm->rx_buf[i] = NULL; + cm->rx_slots++; + break; + } + } + + if (i == RIOCM_RX_RING_SIZE) + riocm_warn("no record for buffer 0x%p", msg); + } + + return msg; +} + +/* + * riocm_rx_fill - fills a ring of receive buffers for given cm device + * @cm: cm_dev object + * @nent: max number of entries to fill + * + * Returns: none + */ +static void riocm_rx_fill(struct cm_dev *cm, int nent) +{ + int i; + + if (cm->rx_slots == 0) + return; + + for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { + if (cm->rx_buf[i] == NULL) { + cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); + if (cm->rx_buf[i] == NULL) + break; + rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); + cm->rx_slots--; + nent--; + } + } +} + +/* + * riocm_rx_free - frees all receive buffers associated with given cm device + * @cm: cm_dev object + * + * Returns: none + */ +static void riocm_rx_free(struct cm_dev *cm) +{ + int i; + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (cm->rx_buf[i] != NULL) { + kfree(cm->rx_buf[i]); + cm->rx_buf[i] = NULL; + } + } +} + +/* + * riocm_req_handler - connection request handler + * @cm: cm_dev object + * @req_data: pointer to the request packet + * + * Returns: 0 if success, or + * -EINVAL if channel is not in correct state, + * -ENODEV if cannot find a channel with specified ID, + * -ENOMEM if unable to allocate memory to store the request + */ +static int riocm_req_handler(struct cm_dev *cm, void *req_data) +{ + struct rio_channel *ch; + struct conn_req *req; + struct rio_ch_chan_hdr *hh = req_data; + u16 chnum; + + chnum = ntohs(hh->dst_ch); + + ch = riocm_get_channel(chnum); + + if (!ch) + return -ENODEV; + + if (ch->state != RIO_CM_LISTEN) { + riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); + riocm_put_channel(ch); + return -EINVAL; + } + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + riocm_put_channel(ch); + return -ENOMEM; + } + + req->destid = ntohl(hh->bhdr.src_id); + req->chan = ntohs(hh->src_ch); + req->cmdev = cm; + + spin_lock_bh(&ch->lock); + list_add_tail(&req->node, &ch->accept_queue); + spin_unlock_bh(&ch->lock); + complete(&ch->comp); + riocm_put_channel(ch); + + return 0; +} + +/* + * riocm_resp_handler - response to connection request handler + * @resp_data: pointer to the response packet + * + * Returns: 0 if success, or + * -EINVAL if channel is not in correct state, + * -ENODEV if cannot find a channel with specified ID, + */ +static int riocm_resp_handler(void *resp_data) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hh = resp_data; + u16 chnum; + + chnum = ntohs(hh->dst_ch); + ch = riocm_get_channel(chnum); + if (!ch) + return -ENODEV; + + if (ch->state != RIO_CM_CONNECT) { + riocm_put_channel(ch); + return -EINVAL; + } + + riocm_exch(ch, RIO_CM_CONNECTED); + ch->rem_channel = ntohs(hh->src_ch); + complete(&ch->comp); + riocm_put_channel(ch); + + return 0; +} + +/* + * riocm_close_handler - channel close request handler + * @req_data: pointer to the request packet + * + * Returns: 0 if success, or + * -ENODEV if cannot find a channel with specified ID, + * + error codes returned by riocm_ch_close. + */ +static int riocm_close_handler(void *data) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hh = data; + int ret; + + riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); + if (!ch) { + spin_unlock_bh(&idr_lock); + return -ENODEV; + } + idr_remove(&ch_idr, ch->id); + spin_unlock_bh(&idr_lock); + + riocm_exch(ch, RIO_CM_DISCONNECT); + + ret = riocm_ch_close(ch); + if (ret) + riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); + + return 0; +} + +/* + * rio_cm_handler - function that services request (non-data) packets + * @cm: cm_dev object + * @data: pointer to the packet + */ +static void rio_cm_handler(struct cm_dev *cm, void *data) +{ + struct rio_ch_chan_hdr *hdr; + + if (!rio_mport_is_running(cm->mport)) + goto out; + + hdr = data; + + riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", + hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); + + switch (hdr->ch_op) { + case CM_CONN_REQ: + riocm_req_handler(cm, data); + break; + case CM_CONN_ACK: + riocm_resp_handler(data); + break; + case CM_CONN_CLOSE: + riocm_close_handler(data); + break; + default: + riocm_error("Invalid packet header"); + break; + } +out: + kfree(data); +} + +/* + * rio_rx_data_handler - received data packet handler + * @cm: cm_dev object + * @buf: data packet + * + * Returns: 0 if success, or + * -ENODEV if cannot find a channel with specified ID, + * -EIO if channel is not in CONNECTED state, + * -ENOMEM if channel RX queue is full (packet discarded) + */ +static int rio_rx_data_handler(struct cm_dev *cm, void *buf) +{ + struct rio_ch_chan_hdr *hdr; + struct rio_channel *ch; + + hdr = buf; + + riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); + + ch = riocm_get_channel(ntohs(hdr->dst_ch)); + if (!ch) { + /* Discard data message for non-existing channel */ + kfree(buf); + return -ENODEV; + } + + /* Place pointer to the buffer into channel's RX queue */ + spin_lock(&ch->lock); + + if (ch->state != RIO_CM_CONNECTED) { + /* Channel is not ready to receive data, discard a packet */ + riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", + ch->id, ch->state); + spin_unlock(&ch->lock); + kfree(buf); + riocm_put_channel(ch); + return -EIO; + } + + if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { + /* If RX ring is full, discard a packet */ + riocm_debug(RX_DATA, "ch=%d is full", ch->id); + spin_unlock(&ch->lock); + kfree(buf); + riocm_put_channel(ch); + return -ENOMEM; + } + + ch->rx_ring.buf[ch->rx_ring.head] = buf; + ch->rx_ring.head++; + ch->rx_ring.count++; + ch->rx_ring.head %= RIOCM_RX_RING_SIZE; + + complete(&ch->comp); + + spin_unlock(&ch->lock); + riocm_put_channel(ch); + + return 0; +} + +/* + * rio_ibmsg_handler - inbound message packet handler + */ +static void rio_ibmsg_handler(struct work_struct *work) +{ + struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); + void *data; + struct rio_ch_chan_hdr *hdr; + + if (!rio_mport_is_running(cm->mport)) + return; + + while (1) { + mutex_lock(&cm->rx_lock); + data = riocm_rx_get_msg(cm); + if (data) + riocm_rx_fill(cm, 1); + mutex_unlock(&cm->rx_lock); + + if (data == NULL) + break; + + hdr = data; + + if (hdr->bhdr.type != RIO_CM_CHAN) { + /* For now simply discard packets other than channel */ + riocm_error("Unsupported TYPE code (0x%x). Msg dropped", + hdr->bhdr.type); + kfree(data); + continue; + } + + /* Process a channel message */ + if (hdr->ch_op == CM_DATA_MSG) + rio_rx_data_handler(cm, data); + else + rio_cm_handler(cm, data); + } +} + +static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, + int mbox, int slot) +{ + struct cm_dev *cm = dev_id; + + if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) + queue_work(cm->rx_wq, &cm->rx_work); +} + +/* + * rio_txcq_handler - TX completion handler + * @cm: cm_dev object + * @slot: TX queue slot + * + * TX completion handler also ensures that pending request packets are placed + * into transmit queue as soon as a free slot becomes available. This is done + * to give higher priority to request packets during high intensity data flow. + */ +static void rio_txcq_handler(struct cm_dev *cm, int slot) +{ + int ack_slot; + + /* ATTN: Add TX completion notification if/when direct buffer + * transfer is implemented. At this moment only correct tracking + * of tx_count is important. + */ + riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", + cm->mport->id, slot, cm->tx_cnt); + + spin_lock(&cm->tx_lock); + ack_slot = cm->tx_ack_slot; + + if (ack_slot == slot) + riocm_debug(TX_EVENT, "slot == ack_slot"); + + while (cm->tx_cnt && ((ack_slot != slot) || + (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { + + cm->tx_buf[ack_slot] = NULL; + ++ack_slot; + ack_slot &= (RIOCM_TX_RING_SIZE - 1); + cm->tx_cnt--; + } + + if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) + riocm_error("tx_cnt %d out of sync", cm->tx_cnt); + + WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); + + cm->tx_ack_slot = ack_slot; + + /* + * If there are pending requests, insert them into transmit queue + */ + if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { + struct tx_req *req, *_req; + int rc; + + list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { + list_del(&req->node); + cm->tx_buf[cm->tx_slot] = req->buffer; + rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, + req->buffer, req->len); + kfree(req->buffer); + kfree(req); + + ++cm->tx_cnt; + ++cm->tx_slot; + cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); + if (cm->tx_cnt == RIOCM_TX_RING_SIZE) + break; + } + } + + spin_unlock(&cm->tx_lock); +} + +static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, + int mbox, int slot) +{ + struct cm_dev *cm = dev_id; + + if (cm && rio_mport_is_running(cm->mport)) + rio_txcq_handler(cm, slot); +} + +static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len) +{ + unsigned long flags; + struct tx_req *treq; + + treq = kzalloc(sizeof(*treq), GFP_KERNEL); + if (treq == NULL) + return -ENOMEM; + + treq->rdev = rdev; + treq->buffer = buffer; + treq->len = len; + + spin_lock_irqsave(&cm->tx_lock, flags); + list_add_tail(&treq->node, &cm->tx_reqs); + spin_unlock_irqrestore(&cm->tx_lock, flags); + return 0; +} + +/* + * riocm_post_send - helper function that places packet into msg TX queue + * @cm: cm_dev object + * @rdev: target RapidIO device object (required by outbound msg interface) + * @buffer: pointer to a packet buffer to send + * @len: length of data to transfer + * @req: request priority flag + * + * Returns: 0 if success, or error code otherwise. + */ +static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&cm->tx_lock, flags); + + if (cm->mport == NULL) { + rc = -ENODEV; + goto err_out; + } + + if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { + riocm_debug(TX, "Tx Queue is full"); + rc = -EBUSY; + goto err_out; + } + + cm->tx_buf[cm->tx_slot] = buffer; + rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); + + riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", + buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); + + ++cm->tx_cnt; + ++cm->tx_slot; + cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); + +err_out: + spin_unlock_irqrestore(&cm->tx_lock, flags); + return rc; +} + +/* + * riocm_ch_send - sends a data packet to a remote device + * @ch_id: local channel ID + * @buf: pointer to a data buffer to send (including CM header) + * @len: length of data to transfer (including CM header) + * + * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET + * + * Returns: 0 if success, or + * -EINVAL if one or more input parameters is/are not valid, + * -ENODEV if cannot find a channel with specified ID, + * -EAGAIN if a channel is not in CONNECTED state, + * + error codes returned by HW send routine. + */ +static int riocm_ch_send(u16 ch_id, void *buf, int len) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hdr; + int ret; + + if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) + return -EINVAL; + + ch = riocm_get_channel(ch_id); + if (!ch) { + riocm_error("%s(%d) ch_%d not found", current->comm, + task_pid_nr(current), ch_id); + return -ENODEV; + } + + if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { + ret = -EAGAIN; + goto err_out; + } + + /* + * Fill buffer header section with corresponding channel data + */ + hdr = buf; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_DATA_MSG; + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + hdr->msg_len = htons((u16)len); + + /* ATTN: the function call below relies on the fact that underlying + * HW-specific add_outb_message() routine copies TX data into its own + * internal transfer buffer (true for all RIONET compatible mport + * drivers). Must be reviewed if mport driver uses the buffer directly. + */ + + ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); + if (ret) + riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); +err_out: + riocm_put_channel(ch); + return ret; +} + +static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) +{ + int i, ret = -EINVAL; + + spin_lock_bh(&ch->lock); + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (ch->rx_ring.inuse[i] == buf) { + ch->rx_ring.inuse[i] = NULL; + ch->rx_ring.inuse_cnt--; + ret = 0; + break; + } + } + + spin_unlock_bh(&ch->lock); + + if (!ret) + kfree(buf); + + return ret; +} + +/* + * riocm_ch_receive - fetch a data packet received for the specified channel + * @ch: local channel ID + * @buf: pointer to a packet buffer + * @timeout: timeout to wait for incoming packet (in jiffies) + * + * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: + * -EAGAIN if a channel is not in CONNECTED state, + * -ENOMEM if in-use tracking queue is full, + * -ETIME if wait timeout expired, + * -EINTR if wait was interrupted. + */ +static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) +{ + void *rxmsg = NULL; + int i, ret = 0; + long wret; + + if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { + ret = -EAGAIN; + goto out; + } + + if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { + /* If we do not have entries to track buffers given to upper + * layer, reject request. + */ + ret = -ENOMEM; + goto out; + } + + wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); + + riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); + + if (!wret) + ret = -ETIME; + else if (wret == -ERESTARTSYS) + ret = -EINTR; + else + ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; + + if (ret) + goto out; + + spin_lock_bh(&ch->lock); + + rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; + ch->rx_ring.buf[ch->rx_ring.tail] = NULL; + ch->rx_ring.count--; + ch->rx_ring.tail++; + ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; + ret = -ENOMEM; + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (ch->rx_ring.inuse[i] == NULL) { + ch->rx_ring.inuse[i] = rxmsg; + ch->rx_ring.inuse_cnt++; + ret = 0; + break; + } + } + + if (ret) { + /* We have no entry to store pending message: drop it */ + kfree(rxmsg); + rxmsg = NULL; + } + + spin_unlock_bh(&ch->lock); +out: + *buf = rxmsg; + return ret; +} + +/* + * riocm_ch_connect - sends a connect request to a remote device + * @loc_ch: local channel ID + * @cm: CM device to send connect request + * @peer: target RapidIO device + * @rem_ch: remote channel ID + * + * Returns: 0 if success, or + * -EINVAL if the channel is not in IDLE state, + * -EAGAIN if no connection request available immediately, + * -ETIME if ACK response timeout expired, + * -EINTR if wait for response was interrupted. + */ +static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, + struct cm_peer *peer, u16 rem_ch) +{ + struct rio_channel *ch = NULL; + struct rio_ch_chan_hdr *hdr; + int ret; + long wret; + + ch = riocm_get_channel(loc_ch); + if (!ch) + return -ENODEV; + + if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { + ret = -EINVAL; + goto conn_done; + } + + ch->cmdev = cm; + ch->rdev = peer->rdev; + ch->context = NULL; + ch->loc_destid = cm->mport->host_deviceid; + ch->rem_channel = rem_ch; + + /* + * Send connect request to the remote RapidIO device + */ + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) { + ret = -ENOMEM; + goto conn_done; + } + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(peer->rdev->destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_REQ; + hdr->dst_ch = htons(rem_ch); + hdr->src_ch = htons(loc_ch); + + /* ATTN: the function call below relies on the fact that underlying + * HW-specific add_outb_message() routine copies TX data into its + * internal transfer buffer. Must be reviewed if mport driver uses + * this buffer directly. + */ + ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); + + if (ret != -EBUSY) { + kfree(hdr); + } else { + ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); + if (ret) + kfree(hdr); + } + + if (ret) { + riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); + goto conn_done; + } + + /* Wait for connect response from the remote device */ + wret = wait_for_completion_interruptible_timeout(&ch->comp, + RIOCM_CONNECT_TO * HZ); + riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); + + if (!wret) + ret = -ETIME; + else if (wret == -ERESTARTSYS) + ret = -EINTR; + else + ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; + +conn_done: + riocm_put_channel(ch); + return ret; +} + +static int riocm_send_ack(struct rio_channel *ch) +{ + struct rio_ch_chan_hdr *hdr; + int ret; + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_ACK; + + /* ATTN: the function call below relies on the fact that underlying + * add_outb_message() routine copies TX data into its internal transfer + * buffer. Review if switching to direct buffer version. + */ + ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); + + if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, + ch->rdev, hdr, sizeof(*hdr))) + return 0; + kfree(hdr); + + if (ret) + riocm_error("send ACK to ch_%d on %s failed (ret=%d)", + ch->id, rio_name(ch->rdev), ret); + return ret; +} + +/* + * riocm_ch_accept - accept incoming connection request + * @ch_id: channel ID + * @new_ch_id: local mport device + * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection + * request is not available). + * + * Returns: pointer to new channel struct if success, or error-valued pointer: + * -ENODEV - cannot find specified channel or mport, + * -EINVAL - the channel is not in IDLE state, + * -EAGAIN - no connection request available immediately (timeout=0), + * -ENOMEM - unable to allocate new channel, + * -ETIME - wait timeout expired, + * -EINTR - wait was interrupted. + */ +static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, + long timeout) +{ + struct rio_channel *ch; + struct rio_channel *new_ch; + struct conn_req *req; + struct cm_peer *peer; + int found = 0; + int err = 0; + long wret; + + ch = riocm_get_channel(ch_id); + if (!ch) + return ERR_PTR(-EINVAL); + + if (!riocm_cmp(ch, RIO_CM_LISTEN)) { + err = -EINVAL; + goto err_put; + } + + /* Don't sleep if this is a non blocking call */ + if (!timeout) { + if (!try_wait_for_completion(&ch->comp)) { + err = -EAGAIN; + goto err_put; + } + } else { + riocm_debug(WAIT, "on %d", ch->id); + + wret = wait_for_completion_interruptible_timeout(&ch->comp, + timeout); + if (!wret) { + err = -ETIME; + goto err_put; + } else if (wret == -ERESTARTSYS) { + err = -EINTR; + goto err_put; + } + } + + spin_lock_bh(&ch->lock); + + if (ch->state != RIO_CM_LISTEN) { + err = -ECANCELED; + } else if (list_empty(&ch->accept_queue)) { + riocm_debug(WAIT, "on %d accept_queue is empty on completion", + ch->id); + err = -EIO; + } + + spin_unlock_bh(&ch->lock); + + if (err) { + riocm_debug(WAIT, "on %d returns %d", ch->id, err); + goto err_put; + } + + /* Create new channel for this connection */ + new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); + + if (IS_ERR(new_ch)) { + riocm_error("failed to get channel for new req (%ld)", + PTR_ERR(new_ch)); + err = -ENOMEM; + goto err_put; + } + + spin_lock_bh(&ch->lock); + + req = list_first_entry(&ch->accept_queue, struct conn_req, node); + list_del(&req->node); + new_ch->cmdev = ch->cmdev; + new_ch->loc_destid = ch->loc_destid; + new_ch->rem_destid = req->destid; + new_ch->rem_channel = req->chan; + + spin_unlock_bh(&ch->lock); + riocm_put_channel(ch); + ch = NULL; + kfree(req); + + down_read(&rdev_sem); + /* Find requester's device object */ + list_for_each_entry(peer, &new_ch->cmdev->peers, node) { + if (peer->rdev->destid == new_ch->rem_destid) { + riocm_debug(RX_CMD, "found matching device(%s)", + rio_name(peer->rdev)); + found = 1; + break; + } + } + up_read(&rdev_sem); + + if (!found) { + /* If peer device object not found, simply ignore the request */ + err = -ENODEV; + goto err_put_new_ch; + } + + new_ch->rdev = peer->rdev; + new_ch->state = RIO_CM_CONNECTED; + spin_lock_init(&new_ch->lock); + + /* Acknowledge the connection request. */ + riocm_send_ack(new_ch); + + *new_ch_id = new_ch->id; + return new_ch; + +err_put_new_ch: + spin_lock_bh(&idr_lock); + idr_remove(&ch_idr, new_ch->id); + spin_unlock_bh(&idr_lock); + riocm_put_channel(new_ch); + +err_put: + if (ch) + riocm_put_channel(ch); + *new_ch_id = 0; + return ERR_PTR(err); +} + +/* + * riocm_ch_listen - puts a channel into LISTEN state + * @ch_id: channel ID + * + * Returns: 0 if success, or + * -EINVAL if the specified channel does not exists or + * is not in CHAN_BOUND state. + */ +static int riocm_ch_listen(u16 ch_id) +{ + struct rio_channel *ch = NULL; + int ret = 0; + + riocm_debug(CHOP, "(ch_%d)", ch_id); + + ch = riocm_get_channel(ch_id); + if (!ch) + return -EINVAL; + if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) + ret = -EINVAL; + riocm_put_channel(ch); + return ret; +} + +/* + * riocm_ch_bind - associate a channel object and an mport device + * @ch_id: channel ID + * @mport_id: local mport device ID + * @context: pointer to the additional caller's context + * + * Returns: 0 if success, or + * -ENODEV if cannot find specified mport, + * -EINVAL if the specified channel does not exist or + * is not in IDLE state. + */ +static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) +{ + struct rio_channel *ch = NULL; + struct cm_dev *cm; + int rc = -ENODEV; + + riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); + + /* Find matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if ((cm->mport->id == mport_id) && + rio_mport_is_running(cm->mport)) { + rc = 0; + break; + } + } + + if (rc) + goto exit; + + ch = riocm_get_channel(ch_id); + if (!ch) { + rc = -EINVAL; + goto exit; + } + + spin_lock_bh(&ch->lock); + if (ch->state != RIO_CM_IDLE) { + spin_unlock_bh(&ch->lock); + rc = -EINVAL; + goto err_put; + } + + ch->cmdev = cm; + ch->loc_destid = cm->mport->host_deviceid; + ch->context = context; + ch->state = RIO_CM_CHAN_BOUND; + spin_unlock_bh(&ch->lock); +err_put: + riocm_put_channel(ch); +exit: + up_read(&rdev_sem); + return rc; +} + +/* + * riocm_ch_alloc - channel object allocation helper routine + * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) + * + * Return value: pointer to newly created channel object, + * or error-valued pointer + */ +static struct rio_channel *riocm_ch_alloc(u16 ch_num) +{ + int id; + int start, end; + struct rio_channel *ch; + + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) + return ERR_PTR(-ENOMEM); + + if (ch_num) { + /* If requested, try to obtain the specified channel ID */ + start = ch_num; + end = ch_num + 1; + } else { + /* Obtain channel ID from the dynamic allocation range */ + start = chstart; + end = RIOCM_MAX_CHNUM + 1; + } + + idr_preload(GFP_KERNEL); + spin_lock_bh(&idr_lock); + id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); + spin_unlock_bh(&idr_lock); + idr_preload_end(); + + if (id < 0) { + kfree(ch); + return ERR_PTR(id == -ENOSPC ? -EBUSY : id); + } + + ch->id = (u16)id; + ch->state = RIO_CM_IDLE; + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->accept_queue); + INIT_LIST_HEAD(&ch->ch_node); + init_completion(&ch->comp); + init_completion(&ch->comp_close); + kref_init(&ch->ref); + ch->rx_ring.head = 0; + ch->rx_ring.tail = 0; + ch->rx_ring.count = 0; + ch->rx_ring.inuse_cnt = 0; + + return ch; +} + +/* + * riocm_ch_create - creates a new channel object and allocates ID for it + * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) + * + * Allocates and initializes a new channel object. If the parameter ch_num > 0 + * and is within the valid range, riocm_ch_create tries to allocate the + * specified ID for the new channel. If ch_num = 0, channel ID will be assigned + * automatically from the range (chstart ... RIOCM_MAX_CHNUM). + * Module parameter 'chstart' defines start of an ID range available for dynamic + * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. + * Available channel numbers are limited by 16-bit size of channel numbers used + * in the packet header. + * + * Return value: PTR to rio_channel structure if successful (with channel number + * updated via pointer) or error-valued pointer if error. + */ +static struct rio_channel *riocm_ch_create(u16 *ch_num) +{ + struct rio_channel *ch = NULL; + + ch = riocm_ch_alloc(*ch_num); + + if (IS_ERR(ch)) + riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", + *ch_num, PTR_ERR(ch)); + else + *ch_num = ch->id; + + return ch; +} + +/* + * riocm_ch_free - channel object release routine + * @ref: pointer to a channel's kref structure + */ +static void riocm_ch_free(struct kref *ref) +{ + struct rio_channel *ch = container_of(ref, struct rio_channel, ref); + int i; + + riocm_debug(CHOP, "(ch_%d)", ch->id); + + if (ch->rx_ring.inuse_cnt) { + for (i = 0; + i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { + if (ch->rx_ring.inuse[i] != NULL) { + kfree(ch->rx_ring.inuse[i]); + ch->rx_ring.inuse_cnt--; + } + } + } + + if (ch->rx_ring.count) + for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { + if (ch->rx_ring.buf[i] != NULL) { + kfree(ch->rx_ring.buf[i]); + ch->rx_ring.count--; + } + } + + complete(&ch->comp_close); +} + +static int riocm_send_close(struct rio_channel *ch) +{ + struct rio_ch_chan_hdr *hdr; + int ret; + + /* + * Send CH_CLOSE notification to the remote RapidIO device + */ + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_CLOSE; + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + + /* ATTN: the function call below relies on the fact that underlying + * add_outb_message() routine copies TX data into its internal transfer + * buffer. Needs to be reviewed if switched to direct buffer mode. + */ + ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); + + if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, + hdr, sizeof(*hdr))) + return 0; + kfree(hdr); + + if (ret) + riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); + + return ret; +} + +/* + * riocm_ch_close - closes a channel object with specified ID (by local request) + * @ch: channel to be closed + */ +static int riocm_ch_close(struct rio_channel *ch) +{ + unsigned long tmo = msecs_to_jiffies(3000); + enum rio_cm_state state; + long wret; + int ret = 0; + + riocm_debug(CHOP, "ch_%d by %s(%d)", + ch->id, current->comm, task_pid_nr(current)); + + state = riocm_exch(ch, RIO_CM_DESTROYING); + if (state == RIO_CM_CONNECTED) + riocm_send_close(ch); + + complete_all(&ch->comp); + + riocm_put_channel(ch); + wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); + + riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); + + if (wret == 0) { + /* Timeout on wait occurred */ + riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", + current->comm, task_pid_nr(current), ch->id); + ret = -ETIMEDOUT; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal */ + riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", + current->comm, task_pid_nr(current), ch->id); + ret = -EINTR; + } + + if (!ret) { + riocm_debug(CHOP, "ch_%d resources released", ch->id); + kfree(ch); + } else { + riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); + } + + return ret; +} + +/* + * riocm_cdev_open() - Open character device + */ +static int riocm_cdev_open(struct inode *inode, struct file *filp) +{ + riocm_debug(INIT, "by %s(%d) filp=%p ", + current->comm, task_pid_nr(current), filp); + + if (list_empty(&cm_dev_list)) + return -ENODEV; + + return 0; +} + +/* + * riocm_cdev_release() - Release character device + */ +static int riocm_cdev_release(struct inode *inode, struct file *filp) +{ + struct rio_channel *ch, *_c; + unsigned int i; + LIST_HEAD(list); + + riocm_debug(EXIT, "by %s(%d) filp=%p", + current->comm, task_pid_nr(current), filp); + + /* Check if there are channels associated with this file descriptor */ + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch && ch->filp == filp) { + riocm_debug(EXIT, "ch_%d not released by %s(%d)", + ch->id, current->comm, + task_pid_nr(current)); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } + + return 0; +} + +/* + * cm_ep_get_list_size() - Reports number of endpoints in the network + */ +static int cm_ep_get_list_size(void __user *arg) +{ + u32 __user *p = arg; + u32 mport_id; + u32 count = 0; + struct cm_dev *cm; + + if (get_user(mport_id, p)) + return -EFAULT; + if (mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + /* Find a matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport->id == mport_id) { + count = cm->npeers; + up_read(&rdev_sem); + if (copy_to_user(arg, &count, sizeof(u32))) + return -EFAULT; + return 0; + } + } + up_read(&rdev_sem); + + return -ENODEV; +} + +/* + * cm_ep_get_list() - Returns list of attached endpoints + */ +static int cm_ep_get_list(void __user *arg) +{ + struct cm_dev *cm; + struct cm_peer *peer; + u32 info[2]; + void *buf; + u32 nent; + u32 *entry_ptr; + u32 i = 0; + int ret = 0; + + if (copy_from_user(&info, arg, sizeof(info))) + return -EFAULT; + + if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) + return -EINVAL; + + /* Find a matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) + if (cm->mport->id == (u8)info[1]) + goto found; + + up_read(&rdev_sem); + return -ENODEV; + +found: + nent = min(info[0], cm->npeers); + buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); + if (!buf) { + up_read(&rdev_sem); + return -ENOMEM; + } + + entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); + + list_for_each_entry(peer, &cm->peers, node) { + *entry_ptr = (u32)peer->rdev->destid; + entry_ptr++; + if (++i == nent) + break; + } + up_read(&rdev_sem); + + ((u32 *)buf)[0] = i; /* report an updated number of entries */ + ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ + if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) + ret = -EFAULT; + + kfree(buf); + return ret; +} + +/* + * cm_mport_get_list() - Returns list of available local mport devices + */ +static int cm_mport_get_list(void __user *arg) +{ + int ret = 0; + u32 entries; + void *buf; + struct cm_dev *cm; + u32 *entry_ptr; + int count = 0; + + if (copy_from_user(&entries, arg, sizeof(entries))) + return -EFAULT; + if (entries == 0 || entries > RIO_MAX_MPORTS) + return -EINVAL; + buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Scan all registered cm_dev objects */ + entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (count++ < entries) { + *entry_ptr = (cm->mport->id << 16) | + cm->mport->host_deviceid; + entry_ptr++; + } + } + up_read(&rdev_sem); + + *((u32 *)buf) = count; /* report a real number of entries */ + if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) + ret = -EFAULT; + + kfree(buf); + return ret; +} + +/* + * cm_chan_create() - Create a message exchange channel + */ +static int cm_chan_create(struct file *filp, void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + struct rio_channel *ch; + + if (get_user(ch_num, p)) + return -EFAULT; + + riocm_debug(CHOP, "ch_%d requested by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + ch = riocm_ch_create(&ch_num); + if (IS_ERR(ch)) + return PTR_ERR(ch); + + ch->filp = filp; + riocm_debug(CHOP, "ch_%d created by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + return put_user(ch_num, p); +} + +/* + * cm_chan_close() - Close channel + * @filp: Pointer to file object + * @arg: Channel to close + */ +static int cm_chan_close(struct file *filp, void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + struct rio_channel *ch; + + if (get_user(ch_num, p)) + return -EFAULT; + + riocm_debug(CHOP, "ch_%d by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, ch_num); + if (!ch) { + spin_unlock_bh(&idr_lock); + return 0; + } + if (ch->filp != filp) { + spin_unlock_bh(&idr_lock); + return -EINVAL; + } + idr_remove(&ch_idr, ch->id); + spin_unlock_bh(&idr_lock); + + return riocm_ch_close(ch); +} + +/* + * cm_chan_bind() - Bind channel + * @arg: Channel number + */ +static int cm_chan_bind(void __user *arg) +{ + struct rio_cm_channel chan; + + if (copy_from_user(&chan, arg, sizeof(chan))) + return -EFAULT; + if (chan.mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + return riocm_ch_bind(chan.id, chan.mport_id, NULL); +} + +/* + * cm_chan_listen() - Listen on channel + * @arg: Channel number + */ +static int cm_chan_listen(void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + + if (get_user(ch_num, p)) + return -EFAULT; + + return riocm_ch_listen(ch_num); +} + +/* + * cm_chan_accept() - Accept incoming connection + * @filp: Pointer to file object + * @arg: Channel number + */ +static int cm_chan_accept(struct file *filp, void __user *arg) +{ + struct rio_cm_accept param; + long accept_to; + struct rio_channel *ch; + + if (copy_from_user(¶m, arg, sizeof(param))) + return -EFAULT; + + riocm_debug(CHOP, "on ch_%d by %s(%d)", + param.ch_num, current->comm, task_pid_nr(current)); + + accept_to = param.wait_to ? + msecs_to_jiffies(param.wait_to) : 0; + + ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to); + if (IS_ERR(ch)) + return PTR_ERR(ch); + ch->filp = filp; + + riocm_debug(CHOP, "new ch_%d for %s(%d)", + ch->id, current->comm, task_pid_nr(current)); + + if (copy_to_user(arg, ¶m, sizeof(param))) + return -EFAULT; + return 0; +} + +/* + * cm_chan_connect() - Connect on channel + * @arg: Channel information + */ +static int cm_chan_connect(void __user *arg) +{ + struct rio_cm_channel chan; + struct cm_dev *cm; + struct cm_peer *peer; + int ret = -ENODEV; + + if (copy_from_user(&chan, arg, sizeof(chan))) + return -EFAULT; + if (chan.mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + down_read(&rdev_sem); + + /* Find matching cm_dev object */ + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport->id == chan.mport_id) { + ret = 0; + break; + } + } + + if (ret) + goto err_out; + + if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { + ret = -EINVAL; + goto err_out; + } + + /* Find corresponding RapidIO endpoint device object */ + ret = -ENODEV; + + list_for_each_entry(peer, &cm->peers, node) { + if (peer->rdev->destid == chan.remote_destid) { + ret = 0; + break; + } + } + + if (ret) + goto err_out; + + up_read(&rdev_sem); + + return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); +err_out: + up_read(&rdev_sem); + return ret; +} + +/* + * cm_chan_msg_send() - Send a message through channel + * @arg: Outbound message information + */ +static int cm_chan_msg_send(void __user *arg) +{ + struct rio_cm_msg msg; + void *buf; + int ret; + + if (copy_from_user(&msg, arg, sizeof(msg))) + return -EFAULT; + if (msg.size > RIO_MAX_MSG_SIZE) + return -EINVAL; + + buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = riocm_ch_send(msg.ch_num, buf, msg.size); + + kfree(buf); + return ret; +} + +/* + * cm_chan_msg_rcv() - Receive a message through channel + * @arg: Inbound message information + */ +static int cm_chan_msg_rcv(void __user *arg) +{ + struct rio_cm_msg msg; + struct rio_channel *ch; + void *buf; + long rxto; + int ret = 0, msg_size; + + if (copy_from_user(&msg, arg, sizeof(msg))) + return -EFAULT; + + if (msg.ch_num == 0 || msg.size == 0) + return -EINVAL; + + ch = riocm_get_channel(msg.ch_num); + if (!ch) + return -ENODEV; + + rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; + + ret = riocm_ch_receive(ch, &buf, rxto); + if (ret) + goto out; + + msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); + + if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) + ret = -EFAULT; + + riocm_ch_free_rxbuf(ch, buf); +out: + riocm_put_channel(ch); + return ret; +} + +/* + * riocm_cdev_ioctl() - IOCTL requests handler + */ +static long +riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case RIO_CM_EP_GET_LIST_SIZE: + return cm_ep_get_list_size((void __user *)arg); + case RIO_CM_EP_GET_LIST: + return cm_ep_get_list((void __user *)arg); + case RIO_CM_CHAN_CREATE: + return cm_chan_create(filp, (void __user *)arg); + case RIO_CM_CHAN_CLOSE: + return cm_chan_close(filp, (void __user *)arg); + case RIO_CM_CHAN_BIND: + return cm_chan_bind((void __user *)arg); + case RIO_CM_CHAN_LISTEN: + return cm_chan_listen((void __user *)arg); + case RIO_CM_CHAN_ACCEPT: + return cm_chan_accept(filp, (void __user *)arg); + case RIO_CM_CHAN_CONNECT: + return cm_chan_connect((void __user *)arg); + case RIO_CM_CHAN_SEND: + return cm_chan_msg_send((void __user *)arg); + case RIO_CM_CHAN_RECEIVE: + return cm_chan_msg_rcv((void __user *)arg); + case RIO_CM_MPORT_GET_LIST: + return cm_mport_get_list((void __user *)arg); + default: + break; + } + + return -EINVAL; +} + +static const struct file_operations riocm_cdev_fops = { + .owner = THIS_MODULE, + .open = riocm_cdev_open, + .release = riocm_cdev_release, + .unlocked_ioctl = riocm_cdev_ioctl, +}; + +/* + * riocm_add_dev - add new remote RapidIO device into channel management core + * @dev: device object associated with RapidIO device + * @sif: subsystem interface + * + * Adds the specified RapidIO device (if applicable) into peers list of + * the corresponding channel management device (cm_dev). + */ +static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) +{ + struct cm_peer *peer; + struct rio_dev *rdev = to_rio_dev(dev); + struct cm_dev *cm; + + /* Check if the remote device has capabilities required to support CM */ + if (!dev_cm_capable(rdev)) + return 0; + + riocm_debug(RDEV, "(%s)", rio_name(rdev)); + + peer = kmalloc(sizeof(*peer), GFP_KERNEL); + if (!peer) + return -ENOMEM; + + /* Find a corresponding cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == rdev->net->hport) + goto found; + } + + up_write(&rdev_sem); + kfree(peer); + return -ENODEV; + +found: + peer->rdev = rdev; + list_add_tail(&peer->node, &cm->peers); + cm->npeers++; + + up_write(&rdev_sem); + return 0; +} + +/* + * riocm_remove_dev - remove remote RapidIO device from channel management core + * @dev: device object associated with RapidIO device + * @sif: subsystem interface + * + * Removes the specified RapidIO device (if applicable) from peers list of + * the corresponding channel management device (cm_dev). + */ +static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct cm_dev *cm; + struct cm_peer *peer; + struct rio_channel *ch, *_c; + unsigned int i; + bool found = false; + LIST_HEAD(list); + + /* Check if the remote device has capabilities required to support CM */ + if (!dev_cm_capable(rdev)) + return; + + riocm_debug(RDEV, "(%s)", rio_name(rdev)); + + /* Find matching cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == rdev->net->hport) { + found = true; + break; + } + } + + if (!found) { + up_write(&rdev_sem); + return; + } + + /* Remove remote device from the list of peers */ + found = false; + list_for_each_entry(peer, &cm->peers, node) { + if (peer->rdev == rdev) { + riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); + found = true; + list_del(&peer->node); + cm->npeers--; + kfree(peer); + break; + } + } + + up_write(&rdev_sem); + + if (!found) + return; + + /* + * Release channels associated with this peer + */ + + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch && ch->rdev == rdev) { + if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) + riocm_exch(ch, RIO_CM_DISCONNECT); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } +} + +/* + * riocm_cdev_add() - Create rio_cm char device + * @devno: device number assigned to device (MAJ + MIN) + */ +static int riocm_cdev_add(dev_t devno) +{ + int ret; + + cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); + riocm_cdev.cdev.owner = THIS_MODULE; + ret = cdev_add(&riocm_cdev.cdev, devno, 1); + if (ret < 0) { + riocm_error("Cannot register a device with error %d", ret); + return ret; + } + + riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); + if (IS_ERR(riocm_cdev.dev)) { + cdev_del(&riocm_cdev.cdev); + return PTR_ERR(riocm_cdev.dev); + } + + riocm_debug(MPORT, "Added %s cdev(%d:%d)", + DEV_NAME, MAJOR(devno), MINOR(devno)); + + return 0; +} + +/* + * riocm_add_mport - add new local mport device into channel management core + * @dev: device object associated with mport + * @class_intf: class interface + * + * When a new mport device is added, CM immediately reserves inbound and + * outbound RapidIO mailboxes that will be used. + */ +static int riocm_add_mport(struct device *dev, + struct class_interface *class_intf) +{ + int rc; + int i; + struct cm_dev *cm; + struct rio_mport *mport = to_rio_mport(dev); + + riocm_debug(MPORT, "add mport %s", mport->name); + + cm = kzalloc(sizeof(*cm), GFP_KERNEL); + if (!cm) + return -ENOMEM; + + cm->mport = mport; + + rc = rio_request_outb_mbox(mport, cm, cmbox, + RIOCM_TX_RING_SIZE, riocm_outb_msg_event); + if (rc) { + riocm_error("failed to allocate OBMBOX_%d on %s", + cmbox, mport->name); + kfree(cm); + return -ENODEV; + } + + rc = rio_request_inb_mbox(mport, cm, cmbox, + RIOCM_RX_RING_SIZE, riocm_inb_msg_event); + if (rc) { + riocm_error("failed to allocate IBMBOX_%d on %s", + cmbox, mport->name); + rio_release_outb_mbox(mport, cmbox); + kfree(cm); + return -ENODEV; + } + + cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); + if (!cm->rx_wq) { + rio_release_inb_mbox(mport, cmbox); + rio_release_outb_mbox(mport, cmbox); + kfree(cm); + return -ENOMEM; + } + + /* + * Allocate and register inbound messaging buffers to be ready + * to receive channel and system management requests + */ + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) + cm->rx_buf[i] = NULL; + + cm->rx_slots = RIOCM_RX_RING_SIZE; + mutex_init(&cm->rx_lock); + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); + + cm->tx_slot = 0; + cm->tx_cnt = 0; + cm->tx_ack_slot = 0; + spin_lock_init(&cm->tx_lock); + + INIT_LIST_HEAD(&cm->peers); + cm->npeers = 0; + INIT_LIST_HEAD(&cm->tx_reqs); + + down_write(&rdev_sem); + list_add_tail(&cm->list, &cm_dev_list); + up_write(&rdev_sem); + + return 0; +} + +/* + * riocm_remove_mport - remove local mport device from channel management core + * @dev: device object associated with mport + * @class_intf: class interface + * + * Removes a local mport device from the list of registered devices that provide + * channel management services. Returns an error if the specified mport is not + * registered with the CM core. + */ +static void riocm_remove_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = to_rio_mport(dev); + struct cm_dev *cm; + struct cm_peer *peer, *temp; + struct rio_channel *ch, *_c; + unsigned int i; + bool found = false; + LIST_HEAD(list); + + riocm_debug(MPORT, "%s", mport->name); + + /* Find a matching cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == mport) { + list_del(&cm->list); + found = true; + break; + } + } + up_write(&rdev_sem); + if (!found) + return; + + flush_workqueue(cm->rx_wq); + destroy_workqueue(cm->rx_wq); + + /* Release channels bound to this mport */ + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch->cmdev == cm) { + riocm_debug(RDEV, "%s drop ch_%d", + mport->name, ch->id); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } + + rio_release_inb_mbox(mport, cmbox); + rio_release_outb_mbox(mport, cmbox); + + /* Remove and free peer entries */ + if (!list_empty(&cm->peers)) + riocm_debug(RDEV, "ATTN: peer list not empty"); + list_for_each_entry_safe(peer, temp, &cm->peers, node) { + riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); + list_del(&peer->node); + kfree(peer); + } + + riocm_rx_free(cm); + kfree(cm); + riocm_debug(MPORT, "%s done", mport->name); +} + +static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, + void *unused) +{ + struct rio_channel *ch; + unsigned int i; + LIST_HEAD(list); + + riocm_debug(EXIT, "."); + + /* + * If there are any channels left in connected state send + * close notification to the connection partner. + * First build a list of channels that require a closing + * notification because function riocm_send_close() should + * be called outside of spinlock protected code. + */ + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch->state == RIO_CM_CONNECTED) { + riocm_debug(EXIT, "close ch %d", ch->id); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + list_for_each_entry(ch, &list, ch_node) + riocm_send_close(ch); + + return NOTIFY_DONE; +} + +/* + * riocm_interface handles addition/removal of remote RapidIO devices + */ +static struct subsys_interface riocm_interface = { + .name = "rio_cm", + .subsys = &rio_bus_type, + .add_dev = riocm_add_dev, + .remove_dev = riocm_remove_dev, +}; + +/* + * rio_mport_interface handles addition/removal local mport devices + */ +static struct class_interface rio_mport_interface __refdata = { + .class = &rio_mport_class, + .add_dev = riocm_add_mport, + .remove_dev = riocm_remove_mport, +}; + +static struct notifier_block rio_cm_notifier = { + .notifier_call = rio_cm_shutdown, +}; + +static int __init riocm_init(void) +{ + int ret; + + /* Create device class needed by udev */ + dev_class = class_create(THIS_MODULE, DRV_NAME); + if (IS_ERR(dev_class)) { + riocm_error("Cannot create " DRV_NAME " class"); + return PTR_ERR(dev_class); + } + + ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); + if (ret) { + class_destroy(dev_class); + return ret; + } + + dev_major = MAJOR(dev_number); + dev_minor_base = MINOR(dev_number); + riocm_debug(INIT, "Registered class with %d major", dev_major); + + /* + * Register as rapidio_port class interface to get notifications about + * mport additions and removals. + */ + ret = class_interface_register(&rio_mport_interface); + if (ret) { + riocm_error("class_interface_register error: %d", ret); + goto err_reg; + } + + /* + * Register as RapidIO bus interface to get notifications about + * addition/removal of remote RapidIO devices. + */ + ret = subsys_interface_register(&riocm_interface); + if (ret) { + riocm_error("subsys_interface_register error: %d", ret); + goto err_cl; + } + + ret = register_reboot_notifier(&rio_cm_notifier); + if (ret) { + riocm_error("failed to register reboot notifier (err=%d)", ret); + goto err_sif; + } + + ret = riocm_cdev_add(dev_number); + if (ret) { + unregister_reboot_notifier(&rio_cm_notifier); + ret = -ENODEV; + goto err_sif; + } + + return 0; +err_sif: + subsys_interface_unregister(&riocm_interface); +err_cl: + class_interface_unregister(&rio_mport_interface); +err_reg: + unregister_chrdev_region(dev_number, 1); + class_destroy(dev_class); + return ret; +} + +static void __exit riocm_exit(void) +{ + riocm_debug(EXIT, "enter"); + unregister_reboot_notifier(&rio_cm_notifier); + subsys_interface_unregister(&riocm_interface); + class_interface_unregister(&rio_mport_interface); + idr_destroy(&ch_idr); + + device_unregister(riocm_cdev.dev); + cdev_del(&(riocm_cdev.cdev)); + + class_destroy(dev_class); + unregister_chrdev_region(dev_number, 1); +} + +late_initcall(riocm_init); +module_exit(riocm_exit); diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig new file mode 100644 index 000000000..3e18f9c51 --- /dev/null +++ b/drivers/rapidio/switches/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# RapidIO switches configuration +# +config RAPIDIO_TSI57X + tristate "IDT Tsi57x SRIO switches support" + help + Includes support for IDT Tsi57x family of serial RapidIO switches. + +config RAPIDIO_CPS_XX + tristate "IDT CPS-xx SRIO switches support" + help + Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. + +config RAPIDIO_TSI568 + tristate "Tsi568 SRIO switch support" + default n + help + Includes support for IDT Tsi568 serial RapidIO switch. + +config RAPIDIO_CPS_GEN2 + tristate "IDT CPS Gen.2 SRIO switch support" + default n + help + Includes support for ITD CPS Gen.2 serial RapidIO switches. + +config RAPIDIO_RXS_GEN3 + tristate "IDT RXS Gen.3 SRIO switch support" + default n + help + Includes support for ITD RXS Gen.3 serial RapidIO switches. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile new file mode 100644 index 000000000..69e7de31e --- /dev/null +++ b/drivers/rapidio/switches/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for RIO switches +# + +obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o +obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o +obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o +obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o +obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c new file mode 100644 index 000000000..8a89bba17 --- /dev/null +++ b/drivers/rapidio/switches/idt_gen2.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IDT CPS Gen.2 Serial RapidIO switch family support + * + * Copyright 2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + */ + +#include <linux/stat.h> +#include <linux/module.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> + +#include <asm/page.h> +#include "../rio.h" + +#define LOCAL_RTE_CONF_DESTID_SEL 0x010070 +#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f + +#define IDT_LT_ERR_REPORT_EN 0x03100c + +#define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) +#define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04 + +#define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) +#define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c +#define IDT_PORT_INIT_TX_ACQUIRED 0x00000020 + +#define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) +#define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10 + +#define IDT_DEV_CTRL_1 0xf2000c +#define IDT_DEV_CTRL_1_GENPW 0x02000000 +#define IDT_DEV_CTRL_1_PRSTBEH 0x00000001 + +#define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008 +#define IDT_CFGBLK_ERR_REPORT 0xf20014 +#define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002 + +#define IDT_AUX_PORT_ERR_CAP_EN 0x020000 +#define IDT_AUX_ERR_REPORT_EN 0xf20018 +#define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002 +#define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001 + +#define IDT_ISLTL_ADDRESS_CAP 0x021014 + +#define IDT_RIO_DOMAIN 0xf20020 +#define IDT_RIO_DOMAIN_MASK 0x000000ff + +#define IDT_PW_INFO_CSR 0xf20024 + +#define IDT_SOFT_RESET 0xf20040 +#define IDT_SOFT_RESET_REQ 0x00030097 + +#define IDT_I2C_MCTRL 0xf20050 +#define IDT_I2C_MCTRL_GENPW 0x04000000 + +#define IDT_JTAG_CTRL 0xf2005c +#define IDT_JTAG_CTRL_GENPW 0x00000002 + +#define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) +#define IDT_LANE_CTRL_BC 0xffff00 +#define IDT_LANE_CTRL_GENPW 0x00200000 +#define IDT_LANE_DFE_1_BC 0xffff18 +#define IDT_LANE_DFE_2_BC 0xffff1c + +#define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) +#define IDT_PORT_OPS_GENPW 0x08000000 +#define IDT_PORT_OPS_PL_ELOG 0x00000040 +#define IDT_PORT_OPS_LL_ELOG 0x00000020 +#define IDT_PORT_OPS_LT_ELOG 0x00000010 +#define IDT_PORT_OPS_BC 0xf4ff04 + +#define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) + +#define IDT_ERR_CAP 0xfd0000 +#define IDT_ERR_CAP_LOG_OVERWR 0x00000004 + +#define IDT_ERR_RD 0xfd0004 + +#define IDT_DEFAULT_ROUTE 0xde +#define IDT_NO_ROUTE 0xdf + +static int +idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + /* + * Select routing table to update + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + /* + * Program destination port for the specified destID + */ + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + udelay(10); + + return 0; +} + +static int +idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + /* + * Select routing table to read + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + + return 0; +} + +static int +idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + /* + * Select routing table to read + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + for (i = RIO_STD_RTE_CONF_EXTCFGEN; + i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) | + (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE); + i += 4; + } + + return 0; +} + + +static int +idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDT_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDT_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static int +idtg2_em_init(struct rio_dev *rdev) +{ + u32 regval; + int i, tmp; + + /* + * This routine performs device-specific initialization only. + * All standard EM configuration should be performed at upper level. + */ + + pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ + rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); + + /* + * Configure LT LAYER error reporting. + */ + + /* Enable standard (RIO.p8) error reporting */ + rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, + REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | + REM_LTL_ERR_UNSUPTR); + + /* Use Port-Writes for LT layer error reporting. + * Enable per-port reset + */ + rio_read_config_32(rdev, IDT_DEV_CTRL_1, ®val); + rio_write_config_32(rdev, IDT_DEV_CTRL_1, + regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); + + /* + * Configure PORT error reporting. + */ + + /* Report all RIO.p8 errors supported by device */ + rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); + + /* Configure reporting of implementation specific errors/events */ + rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, + IDT_PORT_INIT_TX_ACQUIRED); + + /* Use Port-Writes for port error reporting and enable error logging */ + tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); + for (i = 0; i < tmp; i++) { + rio_read_config_32(rdev, IDT_PORT_OPS(i), ®val); + rio_write_config_32(rdev, + IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | + IDT_PORT_OPS_PL_ELOG | + IDT_PORT_OPS_LL_ELOG | + IDT_PORT_OPS_LT_ELOG); + } + /* Overwrite error log if full */ + rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); + + /* + * Configure LANE error reporting. + */ + + /* Disable line error reporting */ + rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); + + /* Use Port-Writes for lane error reporting (when enabled) + * (do per-lane update because lanes may have different configuration) + */ + tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; + for (i = 0; i < tmp; i++) { + rio_read_config_32(rdev, IDT_LANE_CTRL(i), ®val); + rio_write_config_32(rdev, IDT_LANE_CTRL(i), + regval | IDT_LANE_CTRL_GENPW); + } + + /* + * Configure AUX error reporting. + */ + + /* Disable JTAG and I2C Error capture */ + rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); + + /* Disable JTAG and I2C Error reporting/logging */ + rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); + + /* Disable Port-Write notification from JTAG */ + rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); + + /* Disable Port-Write notification from I2C */ + rio_read_config_32(rdev, IDT_I2C_MCTRL, ®val); + rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); + + /* + * Configure CFG_BLK error reporting. + */ + + /* Disable Configuration Block error capture */ + rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); + + /* Disable Port-Writes for Configuration Block error reporting */ + rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, ®val); + rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, + regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + + return 0; +} + +static int +idtg2_em_handler(struct rio_dev *rdev, u8 portnum) +{ + u32 regval, em_perrdet, em_ltlerrdet; + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); + if (em_ltlerrdet) { + /* Service Logical/Transport Layer Error(s) */ + if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { + /* Implementation specific error reported */ + rio_read_config_32(rdev, + IDT_ISLTL_ADDRESS_CAP, ®val); + + pr_debug("RIO: %s Implementation Specific LTL errors" \ + " 0x%x @(0x%x)\n", + rio_name(rdev), em_ltlerrdet, regval); + + /* Clear implementation specific address capture CSR */ + rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); + + } + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); + if (em_perrdet) { + /* Service Port-Level Error(s) */ + if (em_perrdet & REM_PED_IMPL_SPEC) { + /* Implementation Specific port error reported */ + + /* Get IS errors reported */ + rio_read_config_32(rdev, + IDT_PORT_ISERR_DET(portnum), ®val); + + pr_debug("RIO: %s Implementation Specific Port" \ + " errors 0x%x\n", rio_name(rdev), regval); + + /* Clear all implementation specific events */ + rio_write_config_32(rdev, + IDT_PORT_ISERR_DET(portnum), 0); + } + } + + return 0; +} + +static ssize_t +idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + ssize_t len = 0; + u32 regval; + + while (!rio_read_config_32(rdev, IDT_ERR_RD, ®val)) { + if (!regval) /* 0 = end of log */ + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%08x\n", regval); + if (len >= (PAGE_SIZE - 10)) + break; + } + + return len; +} + +static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); + +static int idtg2_sysfs(struct rio_dev *rdev, bool create) +{ + struct device *dev = &rdev->dev; + int err = 0; + + if (create) { + /* Initialize sysfs entries */ + err = device_create_file(dev, &dev_attr_errlog); + if (err) + dev_err(dev, "Unable create sysfs errlog file\n"); + } else + device_remove_file(dev, &dev_attr_errlog); + + return err; +} + +static struct rio_switch_ops idtg2_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtg2_route_add_entry, + .get_entry = idtg2_route_get_entry, + .clr_table = idtg2_route_clr_table, + .set_domain = idtg2_set_domain, + .get_domain = idtg2_get_domain, + .em_init = idtg2_em_init, + .em_handle = idtg2_em_handler, +}; + +static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtg2_switch_ops; + + if (rdev->do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + + spin_unlock(&rdev->rswitch->lock); + + /* Create device-specific sysfs attributes */ + idtg2_sysfs(rdev, true); + + return 0; +} + +static void idtg2_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &idtg2_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); + /* Remove device-specific sysfs attributes */ + idtg2_sysfs(rdev, false); +} + +static const struct rio_device_id idtg2_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtg2_driver = { + .name = "idt_gen2", + .id_table = idtg2_id_table, + .probe = idtg2_probe, + .remove = idtg2_remove, +}; + +static int __init idtg2_init(void) +{ + return rio_register_driver(&idtg2_driver); +} + +static void __exit idtg2_exit(void) +{ + pr_debug("RIO: %s\n", __func__); + rio_unregister_driver(&idtg2_driver); + pr_debug("RIO: %s done\n", __func__); +} + +device_initcall(idtg2_init); +module_exit(idtg2_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c new file mode 100644 index 000000000..d7537e621 --- /dev/null +++ b/drivers/rapidio/switches/idt_gen3.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IDT RXS Gen.3 Serial RapidIO switch family support + * + * Copyright 2016 Integrated Device Technology, Inc. + */ + +#include <linux/stat.h> +#include <linux/module.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> + +#include <asm/page.h> +#include "../rio.h" + +#define RIO_EM_PW_STAT 0x40020 +#define RIO_PW_CTL 0x40204 +#define RIO_PW_CTL_PW_TMR 0xffffff00 +#define RIO_PW_ROUTE 0x40208 + +#define RIO_EM_DEV_INT_EN 0x40030 + +#define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100) +#define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000 + +#define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100) +#define RIO_PLM_SPx_PW_EN_OK2U 0x40000000 +#define RIO_PLM_SPx_PW_EN_LINIT 0x10000000 + +#define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) +#define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ + (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) + +static int +idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 rval; + u32 entry = route_port; + int err = 0; + + pr_debug("RIO: %s t=0x%x did_%x to p_%x\n", + __func__, table, route_destid, entry); + + if (route_destid > 0xFF) + return -EINVAL; + + if (route_port == RIO_INVALID_ROUTE) + entry = RIO_RT_ENTRY_DROP_PKT; + + if (table == RIO_GLOBAL_TABLE) { + /* Use broadcast register to update all per-port tables */ + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid), + entry); + return err; + } + + /* + * Verify that specified port/table number is valid + */ + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), + entry); + return err; +} + +static int +idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 rval; + int err; + + if (route_destid > 0xFF) + return -EINVAL; + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + /* + * This switch device does not have the dedicated global routing table. + * It is substituted by reading routing table of the ingress port of + * maintenance read requests. + */ + if (table == RIO_GLOBAL_TABLE) + table = RIO_GET_PORT_NUM(rval); + else if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), + &rval); + if (err) + return err; + + if (rval == RIO_RT_ENTRY_DROP_PKT) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)rval; + + return 0; +} + +static int +idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + u32 rval; + int err; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0; i <= 0xff; i++) { + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_BC_L2_Gn_ENTRYx_CSR(0, i), + RIO_RT_ENTRY_DROP_PKT); + if (err) + break; + } + + return err; + } + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + for (i = 0; i <= 0xff; i++) { + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), + RIO_RT_ENTRY_DROP_PKT); + if (err) + break; + } + + return err; +} + +/* + * This routine performs device-specific initialization only. + * All standard EM configuration should be performed at upper level. + */ +static int +idtg3_em_init(struct rio_dev *rdev) +{ + int i, tmp; + u32 rval; + + pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Disable assertion of interrupt signal */ + rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0); + + /* Disable port-write event notifications during initialization */ + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, + RIO_EM_PW_TX_CTRL_PW_DIS); + + /* Configure Port-Write notifications for hot-swap events */ + tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); + for (i = 0; i < tmp; i++) { + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i), + &rval); + if (rval & RIO_PORT_N_ERR_STS_PORT_UA) + continue; + + /* Clear events signaled before enabling notification */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0); + + /* Enable event notifications */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i), + RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK); + /* Enable port-write generation on events */ + rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i), + RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT); + + } + + /* Set Port-Write destination port */ + tmp = RIO_GET_PORT_NUM(rdev->swpinfo); + rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp); + + + /* Enable sending port-write event notifications */ + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + return 0; +} + + +/* + * idtg3_em_handler - device-specific error handler + * + * If the link is down (PORT_UNINIT) does nothing - this is considered + * as link partner removal from the port. + * + * If the link is up (PORT_OK) - situation is handled as *new* device insertion. + * In this case ERR_STOP bits are cleared by issuing soft reset command to the + * reporting port. Inbound and outbound ackIDs are cleared by the reset as well. + * This way the port is synchronized with freshly inserted device (assuming it + * was reset/powered-up on insertion). + * + * TODO: This is not sufficient in a situation when a link between two devices + * was down and up again (e.g. cable disconnect). For that situation full ackID + * realignment process has to be implemented. + */ +static int +idtg3_em_handler(struct rio_dev *rdev, u8 pnum) +{ + u32 err_status; + u32 rval; + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); + + /* Do nothing for device/link removal */ + if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) + return 0; + + /* When link is OK we have a device insertion. + * Request port soft reset to clear errors if they present. + * Inbound and outbound ackIDs will be 0 after reset. + */ + if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) { + rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval); + rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), + rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST); + udelay(10); + rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval); + msleep(500); + } + + return 0; +} + +static struct rio_switch_ops idtg3_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtg3_route_add_entry, + .get_entry = idtg3_route_get_entry, + .clr_table = idtg3_route_clr_table, + .em_init = idtg3_em_init, + .em_handle = idtg3_em_handler, +}; + +static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtg3_switch_ops; + + if (rdev->do_enum) { + /* Disable hierarchical routing support: Existing fabric + * enumeration/discovery process (see rio-scan.c) uses 8-bit + * flat destination ID routing only. + */ + rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0); + } + + spin_unlock(&rdev->rswitch->lock); + + return 0; +} + +static void idtg3_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops == &idtg3_switch_ops) + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +/* + * Gen3 switches repeat sending PW messages until a corresponding event flag + * is cleared. Use shutdown notification to disable generation of port-write + * messages if their destination node is shut down. + */ +static void idtg3_shutdown(struct rio_dev *rdev) +{ + int i; + u32 rval; + u16 destid; + + /* Currently the enumerator node acts also as PW handler */ + if (!rdev->do_enum) + return; + + pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev)); + + rio_read_config_32(rdev, RIO_PW_ROUTE, &rval); + i = RIO_GET_PORT_NUM(rdev->swpinfo); + + /* Check port-write destination port */ + if (!((1 << i) & rval)) + return; + + /* Disable sending port-write event notifications if PW destID + * matches to one of the enumerator node + */ + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval); + + if (rval & RIO_EM_PW_TGT_DEVID_DEV16) + destid = rval >> 16; + else + destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16); + + if (rdev->net->hport->host_deviceid == destid) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); + pr_debug("RIO: %s(%s) PW transmission disabled\n", + __func__, rio_name(rdev)); + } +} + +static const struct rio_device_id idtg3_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtg3_driver = { + .name = "idt_gen3", + .id_table = idtg3_id_table, + .probe = idtg3_probe, + .remove = idtg3_remove, + .shutdown = idtg3_shutdown, +}; + +static int __init idtg3_init(void) +{ + return rio_register_driver(&idtg3_driver); +} + +static void __exit idtg3_exit(void) +{ + pr_debug("RIO: %s\n", __func__); + rio_unregister_driver(&idtg3_driver); + pr_debug("RIO: %s done\n", __func__); +} + +device_initcall(idtg3_init); +module_exit(idtg3_exit); + +MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c new file mode 100644 index 000000000..c825728eb --- /dev/null +++ b/drivers/rapidio/switches/idtcps.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IDT CPS RapidIO switches support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/module.h> +#include "../rio.h" + +#define CPS_DEFAULT_ROUTE 0xde +#define CPS_NO_ROUTE 0xdf + +#define IDTCPS_RIO_DOMAIN 0xf20020 + +static int +idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 result; + + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + result = (0xffffff00 & result) | (u32)route_port; + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, result); + } + + return 0; +} + +static int +idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (CPS_DEFAULT_ROUTE == (u8)result || + CPS_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + } + + return 0; +} + +static int +idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0x80000000; i <= 0x800000ff;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (CPS_DEFAULT_ROUTE << 24) | + (CPS_DEFAULT_ROUTE << 16) | + (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE); + i += 4; + } + } + + return 0; +} + +static int +idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static struct rio_switch_ops idtcps_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtcps_route_add_entry, + .get_entry = idtcps_route_get_entry, + .clr_table = idtcps_route_clr_table, + .set_domain = idtcps_set_domain, + .get_domain = idtcps_get_domain, + .em_init = NULL, + .em_handle = NULL, +}; + +static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtcps_switch_ops; + + if (rdev->do_enum) { + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); + } + + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void idtcps_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &idtcps_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static const struct rio_device_id idtcps_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtcps_driver = { + .name = "idtcps", + .id_table = idtcps_id_table, + .probe = idtcps_probe, + .remove = idtcps_remove, +}; + +static int __init idtcps_init(void) +{ + return rio_register_driver(&idtcps_driver); +} + +static void __exit idtcps_exit(void) +{ + rio_unregister_driver(&idtcps_driver); +} + +device_initcall(idtcps_init); +module_exit(idtcps_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c new file mode 100644 index 000000000..103b48a24 --- /dev/null +++ b/drivers/rapidio/switches/tsi568.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO Tsi568 switch support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> +#include <linux/module.h> +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI568_SP_MODE(n) (0x11004 + 0x100*n) +#define TSI568_SP_MODE_PW_DIS 0x08000000 + +static int +tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, &result); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + } + + *route_port = result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), + RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi568_em_init(struct rio_dev *rdev) +{ + u32 regval; + int portnum; + + pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Make sure that Port-Writes are disabled (for all ports) */ + for (portnum = 0; + portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { + rio_read_config_32(rdev, TSI568_SP_MODE(portnum), ®val); + rio_write_config_32(rdev, TSI568_SP_MODE(portnum), + regval | TSI568_SP_MODE_PW_DIS); + } + + return 0; +} + +static struct rio_switch_ops tsi568_switch_ops = { + .owner = THIS_MODULE, + .add_entry = tsi568_route_add_entry, + .get_entry = tsi568_route_get_entry, + .clr_table = tsi568_route_clr_table, + .set_domain = NULL, + .get_domain = NULL, + .em_init = tsi568_em_init, + .em_handle = NULL, +}; + +static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &tsi568_switch_ops; + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void tsi568_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &tsi568_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static const struct rio_device_id tsi568_id_table[] = { + {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver tsi568_driver = { + .name = "tsi568", + .id_table = tsi568_id_table, + .probe = tsi568_probe, + .remove = tsi568_remove, +}; + +static int __init tsi568_init(void) +{ + return rio_register_driver(&tsi568_driver); +} + +static void __exit tsi568_exit(void) +{ + rio_unregister_driver(&tsi568_driver); +} + +device_initcall(tsi568_init); +module_exit(tsi568_exit); + +MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c new file mode 100644 index 000000000..271762046 --- /dev/null +++ b/drivers/rapidio/switches/tsi57x.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RapidIO Tsi57x switch family support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> +#include <linux/module.h> +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI578_SP_MODE(n) (0x11004 + n*0x100) +#define TSI578_SP_MODE_GLBL 0x10004 +#define TSI578_SP_MODE_PW_DIS 0x08000000 +#define TSI578_SP_MODE_LUT_512 0x01000000 + +#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) +#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) +#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) +#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) + +#define TSI578_GLBL_ROUTE_BASE 0x10078 + +static int +tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + /* Use local RT of the ingress port to avoid possible + race condition */ + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &result); + table = (result & RIO_SWP_INFO_PORT_NUM_MASK); + } + + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + + *route_port = (u8)result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + + /* Turn off flat (LUT_512) mode */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE_GLBL, ®val); + rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL, + regval & ~TSI578_SP_MODE_LUT_512); + /* Set switch domain base */ + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, + (u32)(sw_domain << 24)); + return 0; +} + +static int +tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, ®val); + + *sw_domain = (u8)(regval >> 24); + + return 0; +} + +static int +tsi57x_em_init(struct rio_dev *rdev) +{ + u32 regval; + int portnum; + + pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + for (portnum = 0; + portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { + /* Make sure that Port-Writes are enabled (for all ports) */ + rio_read_config_32(rdev, + TSI578_SP_MODE(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_MODE(portnum), + regval & ~TSI578_SP_MODE_PW_DIS); + + /* Clear all pending interrupts */ + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + ®val); + rio_write_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + regval & 0x07120214); + + rio_read_config_32(rdev, + TSI578_SP_INT_STATUS(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_INT_STATUS(portnum), + regval & 0x000700bd); + + /* Enable all interrupts to allow ports to send a port-write */ + rio_read_config_32(rdev, + TSI578_SP_CTL_INDEP(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_CTL_INDEP(portnum), + regval | 0x000b0000); + + /* Skip next (odd) port if the current port is in x4 mode */ + rio_read_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), + ®val); + if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) + portnum++; + } + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8); + + return 0; +} + +static int +tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) +{ + struct rio_mport *mport = rdev->net->hport; + u32 intstat, err_status; + int sendcount, checkcount; + u8 route_port; + u32 regval; + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + &err_status); + + if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && + (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES))) { + /* Remove any queued packets by locking/unlocking port */ + rio_read_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), + ®val); + if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { + rio_write_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), + regval | RIO_PORT_N_CTL_LOCKOUT); + udelay(50); + rio_write_config_32(rdev, + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), + regval); + } + + /* Read from link maintenance response register to clear + * valid bit + */ + rio_read_config_32(rdev, + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum), + ®val); + + /* Send a Packet-Not-Accepted/Link-Request-Input-Status control + * symbol to recover from IES/OES + */ + sendcount = 3; + while (sendcount) { + rio_write_config_32(rdev, + TSI578_SP_CS_TX(portnum), 0x40fc8000); + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_read_config_32(rdev, + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, + portnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) + goto exit_es; + } + + sendcount--; + } + } + +exit_es: + /* Clear implementation specific error status bits */ + rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat); + pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", + rdev->destid, rdev->hopcount, portnum, intstat); + + if (intstat & 0x10000) { + rio_read_config_32(rdev, + TSI578_SP_LUT_PEINF(portnum), ®val); + regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); + route_port = rdev->rswitch->route_table[regval]; + pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", + rio_name(rdev), portnum, regval); + tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount, + RIO_GLOBAL_TABLE, regval, route_port); + } + + rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), + intstat & 0x000700bd); + + return 0; +} + +static struct rio_switch_ops tsi57x_switch_ops = { + .owner = THIS_MODULE, + .add_entry = tsi57x_route_add_entry, + .get_entry = tsi57x_route_get_entry, + .clr_table = tsi57x_route_clr_table, + .set_domain = tsi57x_set_domain, + .get_domain = tsi57x_get_domain, + .em_init = tsi57x_em_init, + .em_handle = tsi57x_em_handler, +}; + +static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + rdev->rswitch->ops = &tsi57x_switch_ops; + + if (rdev->do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void tsi57x_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &tsi57x_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static const struct rio_device_id tsi57x_id_table[] = { + {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver tsi57x_driver = { + .name = "tsi57x", + .id_table = tsi57x_id_table, + .probe = tsi57x_probe, + .remove = tsi57x_remove, +}; + +static int __init tsi57x_init(void) +{ + return rio_register_driver(&tsi57x_driver); +} + +static void __exit tsi57x_exit(void) +{ + rio_unregister_driver(&tsi57x_driver); +} + +device_initcall(tsi57x_init); +module_exit(tsi57x_exit); + +MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); |