diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/dma/dw-edma | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/dw-edma')
-rw-r--r-- | drivers/dma/dw-edma/Kconfig | 19 | ||||
-rw-r--r-- | drivers/dma/dw-edma/Makefile | 7 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.c | 969 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.h | 169 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-pcie.c | 239 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-core.c | 350 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-core.h | 28 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-debugfs.c | 311 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-debugfs.h | 27 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-regs.h | 158 |
10 files changed, 2277 insertions, 0 deletions
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 000000000..7ff17b2db --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 + +config DW_EDMA + tristate "Synopsys DesignWare eDMA controller driver" + depends on PCI && PCI_MSI + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Synopsys DesignWare eDMA controller, normally + implemented on endpoints SoCs. + +config DW_EDMA_PCIE + tristate "Synopsys DesignWare eDMA PCIe driver" + depends on PCI && PCI_MSI + select DW_EDMA + help + Provides a glue-logic between the Synopsys DesignWare + eDMA controller and an endpoint PCIe device. This also serves + as a reference design to whom desires to use this IP. diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 000000000..8d45c0d56 --- /dev/null +++ b/drivers/dma/dw-edma/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o +dw-edma-objs := dw-edma-core.o \ + dw-edma-v0-core.o $(dw-edma-y) +obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 000000000..f91dbf43a --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -0,0 +1,969 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pm_runtime.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/dma/edma.h> +#include <linux/dma-mapping.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + +static inline +struct device *dchan2dev(struct dma_chan *dchan) +{ + return &dchan->dev->device; +} + +static inline +struct device *chan2dev(struct dw_edma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline +struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dw_edma_desc, vd); +} + +static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *burst; + + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); + if (unlikely(!burst)) + return NULL; + + INIT_LIST_HEAD(&burst->list); + if (chunk->burst) { + /* Create and add new element into the linked list */ + chunk->bursts_alloc++; + list_add_tail(&burst->list, &chunk->burst->list); + } else { + /* List head */ + chunk->bursts_alloc = 0; + chunk->burst = burst; + } + + return burst; +} + +static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chan *chan = desc->chan; + struct dw_edma *dw = chan->chip->dw; + struct dw_edma_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + if (unlikely(!chunk)) + return NULL; + + INIT_LIST_HEAD(&chunk->list); + chunk->chan = chan; + /* Toggling change bit (CB) in each chunk, this is a mechanism to + * inform the eDMA HW block that this is a new linked list ready + * to be consumed. + * - Odd chunks originate CB equal to 0 + * - Even chunks originate CB equal to 1 + */ + chunk->cb = !(desc->chunks_alloc % 2); + chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; + chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; + + if (desc->chunk) { + /* Create and add new element into the linked list */ + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); + } else { + /* List head */ + chunk->burst = NULL; + desc->chunks_alloc = 0; + desc->chunk = chunk; + } + + return chunk; +} + +static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + if (!dw_edma_alloc_chunk(desc)) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void dw_edma_free_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child, *_next; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { + list_del(&child->list); + kfree(child); + chunk->bursts_alloc--; + } + + /* Remove the list head */ + kfree(child); + chunk->burst = NULL; +} + +static void dw_edma_free_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chunk *child, *_next; + + if (!desc->chunk) + return; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + } + + /* Remove the list head */ + kfree(child); + desc->chunk = NULL; +} + +static void dw_edma_free_desc(struct dw_edma_desc *desc) +{ + dw_edma_free_chunk(desc); + kfree(desc); +} + +static void vchan_free_desc(struct virt_dma_desc *vdesc) +{ + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); +} + +static int dw_edma_start_transfer(struct dw_edma_chan *chan) +{ + struct dw_edma_chunk *child; + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) + return 0; + + desc = vd2dw_edma_desc(vd); + if (!desc) + return 0; + + child = list_first_entry_or_null(&desc->chunk->list, + struct dw_edma_chunk, list); + if (!child) + return 0; + + dw_edma_v0_core_start(child, !desc->xfer_sz); + desc->xfer_sz += child->ll_region.sz; + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + + return 1; +} + +static int dw_edma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + chan->configured = true; + + return 0; +} + +static int dw_edma_device_pause(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) + err = -EPERM; + else if (chan->status != EDMA_ST_BUSY) + err = -EPERM; + else if (chan->request != EDMA_REQ_NONE) + err = -EPERM; + else + chan->request = EDMA_REQ_PAUSE; + + return err; +} + +static int dw_edma_device_resume(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + err = -EPERM; + } else if (chan->status != EDMA_ST_PAUSE) { + err = -EPERM; + } else if (chan->request != EDMA_REQ_NONE) { + err = -EPERM; + } else { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + + return err; +} + +static int dw_edma_device_terminate_all(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + LIST_HEAD(head); + + if (!chan->configured) { + /* Do nothing */ + } else if (chan->status == EDMA_ST_PAUSE) { + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->status == EDMA_ST_IDLE) { + chan->configured = false; + } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { + /* + * The channel is in a false BUSY state, probably didn't + * receive or lost an interrupt + */ + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->request > EDMA_REQ_PAUSE) { + err = -EPERM; + } else { + chan->request = EDMA_REQ_STOP; + } + + return err; +} + +static void dw_edma_device_issue_pending(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + unsigned long flags; + + if (!chan->configured) + return; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE && + chan->status == EDMA_ST_IDLE) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status +dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) + ret = DMA_PAUSED; + + if (!txstate) + goto ret_residue; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + desc = vd2dw_edma_desc(vd); + if (desc) + residue = desc->alloc_sz - desc->xfer_sz; + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + +ret_residue: + dma_set_residue(txstate, residue); + + return ret; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_transfer(struct dw_edma_transfer *xfer) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); + enum dma_transfer_direction dir = xfer->direction; + phys_addr_t src_addr, dst_addr; + struct scatterlist *sg = NULL; + struct dw_edma_chunk *chunk; + struct dw_edma_burst *burst; + struct dw_edma_desc *desc; + u32 cnt; + int i; + + if (!chan->configured) + return NULL; + + switch (chan->config.direction) { + case DMA_DEV_TO_MEM: /* local dma */ + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) + break; + return NULL; + case DMA_MEM_TO_DEV: /* local dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + default: /* remote dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) + break; + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + } + + if (xfer->cyclic) { + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) + return NULL; + } else { + if (xfer->xfer.sg.len < 1) + return NULL; + } + + desc = dw_edma_alloc_desc(chan); + if (unlikely(!desc)) + goto err_alloc; + + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + + if (xfer->cyclic) { + cnt = xfer->xfer.cyclic.cnt; + } else { + cnt = xfer->xfer.sg.len; + sg = xfer->xfer.sg.sgl; + } + + for (i = 0; i < cnt; i++) { + if (!xfer->cyclic && !sg) + break; + + if (chunk->bursts_alloc == chan->ll_max) { + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + } + + burst = dw_edma_alloc_burst(chunk); + if (unlikely(!burst)) + goto err_alloc; + + if (xfer->cyclic) + burst->sz = xfer->xfer.cyclic.len; + else + burst->sz = sg_dma_len(sg); + + chunk->ll_region.sz += burst->sz; + desc->alloc_sz += burst->sz; + + if (dir == DMA_DEV_TO_MEM) { + burst->sar = src_addr; + if (xfer->cyclic) { + burst->dar = xfer->xfer.cyclic.paddr; + } else { + burst->dar = dst_addr; + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } + } else { + burst->dar = dst_addr; + if (xfer->cyclic) { + burst->sar = xfer->xfer.cyclic.paddr; + } else { + burst->sar = src_addr; + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } + } + + if (!xfer->cyclic) { + src_addr += sg_dma_len(sg); + dst_addr += sg_dma_len(sg); + sg = sg_next(sg); + } + } + + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); + +err_alloc: + if (desc) + dw_edma_free_desc(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.sg.sgl = sgl; + xfer.xfer.sg.len = len; + xfer.flags = flags; + xfer.cyclic = false; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, + size_t len, size_t count, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.cyclic.paddr = paddr; + xfer.xfer.cyclic.len = len; + xfer.xfer.cyclic.cnt = count; + xfer.flags = flags; + xfer.cyclic = true; + + return dw_edma_device_transfer(&xfer); +} + +static void dw_edma_done_interrupt(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_done_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + switch (chan->request) { + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (!desc->chunks_alloc) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + + /* Continue transferring if there are remaining chunks or issued requests. + */ + chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; + break; + + case EDMA_REQ_STOP: + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + break; + + case EDMA_REQ_PAUSE: + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_PAUSE; + break; + + default: + break; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) +{ + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_abort_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; +} + +static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) +{ + struct dw_edma_irq *dw_irq = data; + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + unsigned long off; + u32 mask; + + if (write) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + val = dw_edma_v0_core_status_done_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_done_interrupt(chan); + } + + val = dw_edma_v0_core_status_abort_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_abort_interrupt(chan); + } + + return IRQ_HANDLED; +} + +static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, true); +} + +static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, false); +} + +static irqreturn_t dw_edma_interrupt_common(int irq, void *data) +{ + dw_edma_interrupt(irq, data, true); + dw_edma_interrupt(irq, data, false); + + return IRQ_HANDLED; +} + +static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->status != EDMA_ST_IDLE) + return -EBUSY; + + pm_runtime_get(chan->chip->dev); + + return 0; +} + +static void dw_edma_free_chan_resources(struct dma_chan *dchan) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int ret; + + while (time_before(jiffies, timeout)) { + ret = dw_edma_device_terminate_all(dchan); + if (!ret) + break; + + if (time_after_eq(jiffies, timeout)) + return; + + cpu_relax(); + } + + pm_runtime_put(chan->chip->dev); +} + +static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, + u32 wr_alloc, u32 rd_alloc) +{ + struct dw_edma_region *dt_region; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + struct dw_edma_chan *chan; + size_t ll_chunk, dt_chunk; + struct dw_edma_irq *irq; + struct dma_device *dma; + u32 i, j, cnt, ch_cnt; + u32 alloc, off_alloc; + int err = 0; + u32 pos; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + ll_chunk = dw->ll_region.sz; + dt_chunk = dw->dt_region.sz; + + /* Calculate linked list chunk for each channel */ + ll_chunk /= roundup_pow_of_two(ch_cnt); + + /* Calculate linked list chunk for each channel */ + dt_chunk /= roundup_pow_of_two(ch_cnt); + + if (write) { + i = 0; + cnt = dw->wr_ch_cnt; + dma = &dw->wr_edma; + alloc = wr_alloc; + off_alloc = 0; + } else { + i = dw->wr_ch_cnt; + cnt = dw->rd_ch_cnt; + dma = &dw->rd_edma; + alloc = rd_alloc; + off_alloc = wr_alloc; + } + + INIT_LIST_HEAD(&dma->channels); + for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { + chan = &dw->chan[i]; + + dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); + if (!dt_region) + return -ENOMEM; + + chan->vc.chan.private = dt_region; + + chan->chip = chip; + chan->id = j; + chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; + chan->configured = false; + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + + chan->ll_off = (ll_chunk * i); + chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; + + chan->dt_off = (dt_chunk * i); + + dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", + write ? "write" : "read", j, + chan->ll_off, chan->ll_max); + + if (dw->nr_irqs == 1) + pos = 0; + else + pos = off_alloc + (j % alloc); + + irq = &dw->irq[pos]; + + if (write) + irq->wr_mask |= BIT(j); + else + irq->rd_mask |= BIT(j); + + irq->dw = dw; + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); + + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", + write ? "write" : "read", j, + chan->msi.address_hi, chan->msi.address_lo, + chan->msi.data); + + chan->vc.desc_free = vchan_free_desc; + vchan_init(&chan->vc, dma); + + dt_region->paddr = dw->dt_region.paddr + chan->dt_off; + dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; + dt_region->sz = dt_chunk; + + dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", + write ? "write" : "read", j, chan->dt_off); + + dw_edma_v0_core_device_config(chan); + } + + /* Set DMA channel capabilities */ + dma_cap_zero(dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma->chancnt = cnt; + + /* Set DMA channel callbacks */ + dma->dev = chip->dev; + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; + dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_config = dw_edma_device_config; + dma->device_pause = dw_edma_device_pause; + dma->device_resume = dw_edma_device_resume; + dma->device_terminate_all = dw_edma_device_terminate_all; + dma->device_issue_pending = dw_edma_device_issue_pending; + dma->device_tx_status = dw_edma_device_tx_status; + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + + dma_set_max_seg_size(dma->dev, U32_MAX); + + /* Register DMA device */ + err = dma_async_device_register(dma); + + return err; +} + +static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) +{ + if (*nr_irqs && *alloc < cnt) { + (*alloc)++; + (*nr_irqs)--; + } +} + +static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) +{ + while (*mask * alloc < cnt) + (*mask)++; +} + +static int dw_edma_irq_request(struct dw_edma_chip *chip, + u32 *wr_alloc, u32 *rd_alloc) +{ + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + u32 wr_mask = 1; + u32 rd_mask = 1; + int i, err = 0; + u32 ch_cnt; + int irq; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + + if (dw->nr_irqs < 1) + return -EINVAL; + + if (dw->nr_irqs == 1) { + /* Common IRQ shared among all channels */ + irq = dw->ops->irq_vector(dev, 0); + err = request_irq(irq, dw_edma_interrupt_common, + IRQF_SHARED, dw->name, &dw->irq[0]); + if (err) { + dw->nr_irqs = 0; + return err; + } + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[0].msi); + } else { + /* Distribute IRQs equally among all channels */ + int tmp = dw->nr_irqs; + + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); + } + + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); + + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { + irq = dw->ops->irq_vector(dev, i); + err = request_irq(irq, + i < *wr_alloc ? + dw_edma_interrupt_write : + dw_edma_interrupt_read, + IRQF_SHARED, dw->name, + &dw->irq[i]); + if (err) { + dw->nr_irqs = i; + return err; + } + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[i].msi); + } + + dw->nr_irqs = i; + } + + return err; +} + +int dw_edma_probe(struct dw_edma_chip *chip) +{ + struct device *dev; + struct dw_edma *dw; + u32 wr_alloc = 0; + u32 rd_alloc = 0; + int i, err; + + if (!chip) + return -EINVAL; + + dev = chip->dev; + if (!dev) + return -EINVAL; + + dw = chip->dw; + if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector) + return -EINVAL; + + raw_spin_lock_init(&dw->lock); + + /* Find out how many write channels are supported by hardware */ + dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); + if (!dw->wr_ch_cnt) + return -EINVAL; + + /* Find out how many read channels are supported by hardware */ + dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); + if (!dw->rd_ch_cnt) + return -EINVAL; + + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", + dw->wr_ch_cnt, dw->rd_ch_cnt); + + /* Allocate channels */ + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, + sizeof(*dw->chan), GFP_KERNEL); + if (!dw->chan) + return -ENOMEM; + + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); + + /* Disable eDMA, only to establish the ideal initial conditions */ + dw_edma_v0_core_off(dw); + + /* Request IRQs */ + err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); + if (err) + return err; + + /* Setup write channels */ + err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Setup read channels */ + err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Power management */ + pm_runtime_enable(dev); + + /* Turn debugfs on */ + dw_edma_v0_core_debugfs_on(chip); + + return 0; + +err_irq_free: + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); + + dw->nr_irqs = 0; + + return err; +} +EXPORT_SYMBOL_GPL(dw_edma_probe); + +int dw_edma_remove(struct dw_edma_chip *chip) +{ + struct dw_edma_chan *chan, *_chan; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + int i; + + /* Disable eDMA */ + dw_edma_v0_core_off(dw); + + /* Free irqs */ + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); + + /* Power management */ + pm_runtime_disable(dev); + + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); + list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, + vc.chan.device_node) { + tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); + } + + dma_async_device_unregister(&dw->rd_edma); + list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, + vc.chan.device_node) { + tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); + } + + /* Turn debugfs off */ + dw_edma_v0_core_debugfs_off(); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_edma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 000000000..31fc50d31 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_CORE_H +#define _DW_EDMA_CORE_H + +#include <linux/msi.h> +#include <linux/dma/edma.h> + +#include "../virt-dma.h" + +#define EDMA_LL_SZ 24 + +enum dw_edma_dir { + EDMA_DIR_WRITE = 0, + EDMA_DIR_READ +}; + +enum dw_edma_mode { + EDMA_MODE_LEGACY = 0, + EDMA_MODE_UNROLL +}; + +enum dw_edma_request { + EDMA_REQ_NONE = 0, + EDMA_REQ_STOP, + EDMA_REQ_PAUSE +}; + +enum dw_edma_status { + EDMA_ST_IDLE = 0, + EDMA_ST_PAUSE, + EDMA_ST_BUSY +}; + +struct dw_edma_chan; +struct dw_edma_chunk; + +struct dw_edma_burst { + struct list_head list; + u64 sar; + u64 dar; + u32 sz; +}; + +struct dw_edma_region { + phys_addr_t paddr; + void __iomem *vaddr; + size_t sz; +}; + +struct dw_edma_chunk { + struct list_head list; + struct dw_edma_chan *chan; + struct dw_edma_burst *burst; + + u32 bursts_alloc; + + u8 cb; + struct dw_edma_region ll_region; /* Linked list */ +}; + +struct dw_edma_desc { + struct virt_dma_desc vd; + struct dw_edma_chan *chan; + struct dw_edma_chunk *chunk; + + u32 chunks_alloc; + + u32 alloc_sz; + u32 xfer_sz; +}; + +struct dw_edma_chan { + struct virt_dma_chan vc; + struct dw_edma_chip *chip; + int id; + enum dw_edma_dir dir; + + off_t ll_off; + u32 ll_max; + + off_t dt_off; + + struct msi_msg msi; + + enum dw_edma_request request; + enum dw_edma_status status; + u8 configured; + + struct dma_slave_config config; +}; + +struct dw_edma_irq { + struct msi_msg msi; + u32 wr_mask; + u32 rd_mask; + struct dw_edma *dw; +}; + +struct dw_edma_core_ops { + int (*irq_vector)(struct device *dev, unsigned int nr); +}; + +struct dw_edma { + char name[20]; + + struct dma_device wr_edma; + u16 wr_ch_cnt; + + struct dma_device rd_edma; + u16 rd_ch_cnt; + + struct dw_edma_region rg_region; /* Registers */ + struct dw_edma_region ll_region; /* Linked list */ + struct dw_edma_region dt_region; /* Data */ + + struct dw_edma_irq *irq; + int nr_irqs; + + u32 version; + enum dw_edma_mode mode; + + struct dw_edma_chan *chan; + const struct dw_edma_core_ops *ops; + + raw_spinlock_t lock; /* Only for legacy */ +}; + +struct dw_edma_sg { + struct scatterlist *sgl; + unsigned int len; +}; + +struct dw_edma_cyclic { + dma_addr_t paddr; + size_t len; + size_t cnt; +}; + +struct dw_edma_transfer { + struct dma_chan *dchan; + union dw_edma_xfer { + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + } xfer; + enum dma_transfer_direction direction; + unsigned long flags; + bool cyclic; +}; + +static inline +struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct dw_edma_chan, vc); +} + +static inline +struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) +{ + return vc2dw_edma_chan(to_virt_chan(dchan)); +} + +#endif /* _DW_EDMA_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 000000000..1eafc602e --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA PCIe driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/dma/edma.h> +#include <linux/pci-epf.h> +#include <linux/msi.h> + +#include "dw-edma-core.h" + +struct dw_edma_pcie_data { + /* eDMA registers location */ + enum pci_barno rg_bar; + off_t rg_off; + size_t rg_sz; + /* eDMA memory linked list location */ + enum pci_barno ll_bar; + off_t ll_off; + size_t ll_sz; + /* eDMA memory data location */ + enum pci_barno dt_bar; + off_t dt_off; + size_t dt_sz; + /* Other */ + u32 version; + enum dw_edma_mode mode; + u8 irqs; +}; + +static const struct dw_edma_pcie_data snps_edda_data = { + /* eDMA registers location */ + .rg_bar = BAR_0, + .rg_off = 0x00001000, /* 4 Kbytes */ + .rg_sz = 0x00002000, /* 8 Kbytes */ + /* eDMA memory linked list location */ + .ll_bar = BAR_2, + .ll_off = 0x00000000, /* 0 Kbytes */ + .ll_sz = 0x00800000, /* 8 Mbytes */ + /* eDMA memory data location */ + .dt_bar = BAR_2, + .dt_off = 0x00800000, /* 8 Mbytes */ + .dt_sz = 0x03800000, /* 56 Mbytes */ + /* Other */ + .version = 0, + .mode = EDMA_MODE_UNROLL, + .irqs = 1, +}; + +static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) +{ + return pci_irq_vector(to_pci_dev(dev), nr); +} + +static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { + .irq_vector = dw_edma_pcie_irq_vector, +}; + +static int dw_edma_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *pid) +{ + const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct device *dev = &pdev->dev; + struct dw_edma_chip *chip; + int err, nr_irqs; + struct dw_edma *dw; + + /* Enable PCI device */ + err = pcim_enable_device(pdev); + if (err) { + pci_err(pdev, "enabling device failed\n"); + return err; + } + + /* Mapping PCI BAR regions */ + err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | + BIT(pdata->ll_bar) | + BIT(pdata->dt_bar), + pci_name(pdev)); + if (err) { + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); + return err; + } + + pci_set_master(pdev); + + /* DMA configuration */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "consistent DMA mask 64 set failed\n"); + return err; + } + } else { + pci_err(pdev, "DMA mask 64 set failed\n"); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "DMA mask 32 set failed\n"); + return err; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "consistent DMA mask 32 set failed\n"); + return err; + } + } + + /* Data structure allocation */ + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + /* IRQs allocation */ + nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (nr_irqs < 1) { + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", + nr_irqs); + return -EPERM; + } + + /* Data structure initialization */ + chip->dw = dw; + chip->dev = dev; + chip->id = pdev->devfn; + chip->irq = pdev->irq; + + dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar]; + dw->rg_region.vaddr += pdata->rg_off; + dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; + dw->rg_region.paddr += pdata->rg_off; + dw->rg_region.sz = pdata->rg_sz; + + dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar]; + dw->ll_region.vaddr += pdata->ll_off; + dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; + dw->ll_region.paddr += pdata->ll_off; + dw->ll_region.sz = pdata->ll_sz; + + dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar]; + dw->dt_region.vaddr += pdata->dt_off; + dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; + dw->dt_region.paddr += pdata->dt_off; + dw->dt_region.sz = pdata->dt_sz; + + dw->version = pdata->version; + dw->mode = pdata->mode; + dw->nr_irqs = nr_irqs; + dw->ops = &dw_edma_pcie_core_ops; + + /* Debug info */ + pci_dbg(pdev, "Version:\t%u\n", dw->version); + + pci_dbg(pdev, "Mode:\t%s\n", + dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); + + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + pdata->rg_bar, pdata->rg_off, pdata->rg_sz, + dw->rg_region.vaddr, &dw->rg_region.paddr); + + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + pdata->ll_bar, pdata->ll_off, pdata->ll_sz, + dw->ll_region.vaddr, &dw->ll_region.paddr); + + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + pdata->dt_bar, pdata->dt_off, pdata->dt_sz, + dw->dt_region.vaddr, &dw->dt_region.paddr); + + pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); + + /* Validating if PCI interrupts were enabled */ + if (!pci_dev_msi_enabled(pdev)) { + pci_err(pdev, "enable interrupt failed\n"); + return -EPERM; + } + + dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); + if (!dw->irq) + return -ENOMEM; + + /* Starting eDMA driver */ + err = dw_edma_probe(chip); + if (err) { + pci_err(pdev, "eDMA probe failed\n"); + return err; + } + + /* Saving data structure reference */ + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_edma_pcie_remove(struct pci_dev *pdev) +{ + struct dw_edma_chip *chip = pci_get_drvdata(pdev); + int err; + + /* Stopping eDMA driver */ + err = dw_edma_remove(chip); + if (err) + pci_warn(pdev, "can't remove device properly: %d\n", err); + + /* Freeing IRQs */ + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id dw_edma_pcie_id_table[] = { + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); + +static struct pci_driver dw_edma_pcie_driver = { + .name = "dw-edma-pcie", + .id_table = dw_edma_pcie_id_table, + .probe = dw_edma_pcie_probe, + .remove = dw_edma_pcie_remove, +}; + +module_pci_driver(dw_edma_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 000000000..692de47b1 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/bitfield.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-v0-debugfs.h" + +enum dw_edma_control { + DW_EDMA_V0_CB = BIT(0), + DW_EDMA_V0_TCB = BIT(1), + DW_EDMA_V0_LLP = BIT(2), + DW_EDMA_V0_LIE = BIT(3), + DW_EDMA_V0_RIE = BIT(4), + DW_EDMA_V0_CCS = BIT(8), + DW_EDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return dw->rg_region.vaddr; +} + +#define SET(dw, name, value) \ + writel(value, &(__dw_regs(dw)->name)) + +#define GET(dw, name) \ + readl(&(__dw_regs(dw)->name)) + +#define SET_RW(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET(dw, wr_##name, value); \ + else \ + SET(dw, rd_##name, value); \ + } while (0) + +#define GET_RW(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET(dw, wr_##name) \ + : GET(dw, rd_##name)) + +#define SET_BOTH(dw, name, value) \ + do { \ + SET(dw, wr_##name, value); \ + SET(dw, rd_##name, value); \ + } while (0) + +static inline struct dw_edma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dw->mode == EDMA_MODE_LEGACY) + return &(__dw_regs(dw)->type.legacy.ch); + + if (dir == EDMA_DIR_WRITE) + return &__dw_regs(dw)->type.unroll.ch[ch].wr; + + return &__dw_regs(dw)->type.unroll.ch[ch].rd; +} + +static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u32 value, void __iomem *addr) +{ + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writel(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writel(value, addr); + } +} + +static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readl(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readl(addr); + } + + return value; +} + +#define SET_CH(dw, dir, ch, name, value) \ + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH(dw, dir, ch, name) \ + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL(ll, value) \ + writel(value, ll) + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *dw) +{ + SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, engine_en, 0); +} + +u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch; + + if (dir == EDMA_DIR_WRITE) + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); + else + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); + + if (num_ch > EDMA_V0_MAX_NR_CH) + num_ch = EDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, + GET_CH(dw, chan->dir, chan->id, ch_control1)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); +} + +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); +} + +u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); +} + +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); +} + +static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_v0_lli __iomem *lli; + struct dw_edma_v0_llp __iomem *llp; + u32 control = 0, i = 0; + int j; + + lli = chunk->ll_region.vaddr; + + if (chunk->cb) + control = DW_EDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) + control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); + + /* Channel control */ + SET_LL(&lli[i].control, control); + /* Transfer size */ + SET_LL(&lli[i].transfer_size, child->sz); + /* SAR - low, high */ + SET_LL(&lli[i].sar_low, lower_32_bits(child->sar)); + SET_LL(&lli[i].sar_high, upper_32_bits(child->sar)); + /* DAR - low, high */ + SET_LL(&lli[i].dar_low, lower_32_bits(child->dar)); + SET_LL(&lli[i].dar_high, upper_32_bits(child->dar)); + i++; + } + + llp = (void __iomem *)&lli[i]; + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; + if (!chunk->cb) + control |= DW_EDMA_V0_CB; + + /* Channel control */ + SET_LL(&llp->control, control); + /* Linked list - low, high */ + SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr)); + SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr)); +} + +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + + dw_edma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_RW(dw, chan->dir, engine_en, BIT(0)); + /* Interrupt unmask - done, abort */ + tmp = GET_RW(dw, chan->dir, int_mask); + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, int_mask, tmp); + /* Linked list error */ + tmp = GET_RW(dw, chan->dir, linked_list_err_en); + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, linked_list_err_en, tmp); + /* Channel control */ + SET_CH(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list - low, high */ + SET_CH(dw, chan->dir, chan->id, llp_low, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH(dw, chan->dir, chan->id, llp_high, + upper_32_bits(chunk->ll_region.paddr)); + } + /* Doorbell */ + SET_RW(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); +} + +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp = 0; + + /* MSI done addr - low, high */ + SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); + /* MSI data - low, high */ + switch (chan->id) { + case 0: + case 1: + tmp = GET_RW(dw, chan->dir, ch01_imwr_data); + break; + + case 2: + case 3: + tmp = GET_RW(dw, chan->dir, ch23_imwr_data); + break; + + case 4: + case 5: + tmp = GET_RW(dw, chan->dir, ch45_imwr_data); + break; + + case 6: + case 7: + tmp = GET_RW(dw, chan->dir, ch67_imwr_data); + break; + } + + if (chan->id & BIT(0)) { + /* Channel odd {1, 3, 5, 7} */ + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, + chan->msi.data); + } else { + /* Channel even {0, 2, 4, 6} */ + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, + chan->msi.data); + } + + switch (chan->id) { + case 0: + case 1: + SET_RW(dw, chan->dir, ch01_imwr_data, tmp); + break; + + case 2: + case 3: + SET_RW(dw, chan->dir, ch23_imwr_data, tmp); + break; + + case 4: + case 5: + SET_RW(dw, chan->dir, ch45_imwr_data, tmp); + break; + + case 6: + case 7: + SET_RW(dw, chan->dir, ch67_imwr_data, tmp); + break; + } + + return 0; +} + +/* eDMA debugfs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) +{ + dw_edma_v0_debugfs_on(chip); +} + +void dw_edma_v0_core_debugfs_off(void) +{ + dw_edma_v0_debugfs_off(); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 000000000..abae1527f --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_CORE_H +#define _DW_EDMA_V0_CORE_H + +#include <linux/dma/edma.h> + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *chan); +u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); +u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); +/* eDMA debug fs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_core_debugfs_off(void); + +#endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 000000000..6f62711a4 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/debugfs.h> +#include <linux/bitfield.h> + +#include "dw-edma-v0-debugfs.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(name) \ + ((void __force *)®s->name) +#define REGISTER(name) \ + { #name, REGS_ADDR(name) } + +#define WR_REGISTER(name) \ + { #name, REGS_ADDR(wr_##name) } +#define RD_REGISTER(name) \ + { #name, REGS_ADDR(rd_##name) } + +#define WR_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.wr_##name) } +#define RD_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.rd_##name) } + +#define WR_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.rd_##name) } + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +static struct dentry *base_dir; +static struct dw_edma *dw; +static struct dw_edma_v0_regs __iomem *regs; + +static struct { + void __iomem *start; + void __iomem *end; +} lim[2][EDMA_V0_MAX_NR_CH]; + +struct debugfs_entries { + const char *name; + dma_addr_t *reg; +}; + +static int dw_edma_debugfs_u32_get(void *data, u64 *val) +{ + void __iomem *reg = (void __force __iomem *)data; + if (dw->mode == EDMA_MODE_LEGACY && + reg >= (void __iomem *)®s->type.legacy.ch) { + void __iomem *ptr = ®s->type.legacy.ch; + u32 viewport_sel = 0; + unsigned long flags; + u16 ch; + + for (ch = 0; ch < dw->wr_ch_cnt; ch++) + if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { + ptr += (reg - lim[0][ch].start); + goto legacy_sel_wr; + } + + for (ch = 0; ch < dw->rd_ch_cnt; ch++) + if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { + ptr += (reg - lim[1][ch].start); + goto legacy_sel_rd; + } + + return 0; +legacy_sel_rd: + viewport_sel = BIT(31); +legacy_sel_wr: + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + + raw_spin_lock_irqsave(&dw->lock, flags); + + writel(viewport_sel, ®s->type.legacy.viewport_sel); + *val = readl(ptr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + *val = readl(reg); + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], + int nr_entries, struct dentry *dir) +{ + int i; + + for (i = 0; i < nr_entries; i++) { + if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, + entries[i].reg, &fops_x32)) + break; + } +} + +static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, + struct dentry *dir) +{ + int nr_entries; + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ch_control1), + REGISTER(ch_control2), + REGISTER(transfer_size), + REGISTER(sar_low), + REGISTER(sar_high), + REGISTER(dar_low), + REGISTER(dar_high), + REGISTER(llp_low), + REGISTER(llp_high), + }; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); +} + +static void dw_edma_debugfs_regs_wr(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + WR_REGISTER(engine_en), + WR_REGISTER(doorbell), + WR_REGISTER(ch_arb_weight_low), + WR_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + WR_REGISTER(int_status), + WR_REGISTER(int_mask), + WR_REGISTER(int_clear), + WR_REGISTER(err_status), + WR_REGISTER(done_imwr_low), + WR_REGISTER(done_imwr_high), + WR_REGISTER(abort_imwr_low), + WR_REGISTER(abort_imwr_high), + WR_REGISTER(ch01_imwr_data), + WR_REGISTER(ch23_imwr_data), + WR_REGISTER(ch45_imwr_data), + WR_REGISTER(ch67_imwr_data), + WR_REGISTER(linked_list_err_en), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + WR_REGISTER_UNROLL(engine_chgroup), + WR_REGISTER_UNROLL(engine_hshake_cnt_low), + WR_REGISTER_UNROLL(engine_hshake_cnt_high), + WR_REGISTER_UNROLL(ch0_pwr_en), + WR_REGISTER_UNROLL(ch1_pwr_en), + WR_REGISTER_UNROLL(ch2_pwr_en), + WR_REGISTER_UNROLL(ch3_pwr_en), + WR_REGISTER_UNROLL(ch4_pwr_en), + WR_REGISTER_UNROLL(ch5_pwr_en), + WR_REGISTER_UNROLL(ch6_pwr_en), + WR_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(WRITE_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); + + lim[0][i].start = ®s->type.unroll.ch[i].wr; + lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; + } +} + +static void dw_edma_debugfs_regs_rd(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + RD_REGISTER(engine_en), + RD_REGISTER(doorbell), + RD_REGISTER(ch_arb_weight_low), + RD_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + RD_REGISTER(int_status), + RD_REGISTER(int_mask), + RD_REGISTER(int_clear), + RD_REGISTER(err_status_low), + RD_REGISTER(err_status_high), + RD_REGISTER(linked_list_err_en), + RD_REGISTER(done_imwr_low), + RD_REGISTER(done_imwr_high), + RD_REGISTER(abort_imwr_low), + RD_REGISTER(abort_imwr_high), + RD_REGISTER(ch01_imwr_data), + RD_REGISTER(ch23_imwr_data), + RD_REGISTER(ch45_imwr_data), + RD_REGISTER(ch67_imwr_data), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + RD_REGISTER_UNROLL(engine_chgroup), + RD_REGISTER_UNROLL(engine_hshake_cnt_low), + RD_REGISTER_UNROLL(engine_hshake_cnt_high), + RD_REGISTER_UNROLL(ch0_pwr_en), + RD_REGISTER_UNROLL(ch1_pwr_en), + RD_REGISTER_UNROLL(ch2_pwr_en), + RD_REGISTER_UNROLL(ch3_pwr_en), + RD_REGISTER_UNROLL(ch4_pwr_en), + RD_REGISTER_UNROLL(ch5_pwr_en), + RD_REGISTER_UNROLL(ch6_pwr_en), + RD_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(READ_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); + + lim[1][i].start = ®s->type.unroll.ch[i].rd; + lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; + } +} + +static void dw_edma_debugfs_regs(void) +{ + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ctrl_data_arb_prior), + REGISTER(ctrl), + }; + struct dentry *regs_dir; + int nr_entries; + + regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + dw_edma_debugfs_regs_wr(regs_dir); + dw_edma_debugfs_regs_rd(regs_dir); +} + +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ + dw = chip->dw; + if (!dw) + return; + + regs = dw->rg_region.vaddr; + if (!regs) + return; + + base_dir = debugfs_create_dir(dw->name, NULL); + if (!base_dir) + return; + + debugfs_create_u32("version", 0444, base_dir, &dw->version); + debugfs_create_u32("mode", 0444, base_dir, &dw->mode); + debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); + + dw_edma_debugfs_regs(); +} + +void dw_edma_v0_debugfs_off(void) +{ + debugfs_remove_recursive(base_dir); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 000000000..5450a0a94 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_DEBUG_FS_H +#define _DW_EDMA_V0_DEBUG_FS_H + +#include <linux/dma/edma.h> + +#ifdef CONFIG_DEBUG_FS +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_debugfs_off(void); +#else +static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ +} + +static inline void dw_edma_v0_debugfs_off(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_EDMA_V0_DEBUG_FS_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 000000000..dfd70e223 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_REGS_H +#define _DW_EDMA_V0_REGS_H + +#include <linux/dmaengine.h> + +#define EDMA_V0_MAX_NR_CH 8 +#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) +#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) +#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) +#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) +#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) +#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) +#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) +#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) + +#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) +#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) + +struct dw_edma_v0_ch_regs { + u32 ch_control1; /* 0x000 */ + u32 ch_control2; /* 0x004 */ + u32 transfer_size; /* 0x008 */ + u32 sar_low; /* 0x00c */ + u32 sar_high; /* 0x010 */ + u32 dar_low; /* 0x014 */ + u32 dar_high; /* 0x018 */ + u32 llp_low; /* 0x01c */ + u32 llp_high; /* 0x020 */ +}; + +struct dw_edma_v0_ch { + struct dw_edma_v0_ch_regs wr; /* 0x200 */ + u32 padding_1[55]; /* [0x224..0x2fc] */ + struct dw_edma_v0_ch_regs rd; /* 0x300 */ + u32 padding_2[55]; /* [0x324..0x3fc] */ +}; + +struct dw_edma_v0_unroll { + u32 padding_1; /* 0x0f8 */ + u32 wr_engine_chgroup; /* 0x100 */ + u32 rd_engine_chgroup; /* 0x104 */ + u32 wr_engine_hshake_cnt_low; /* 0x108 */ + u32 wr_engine_hshake_cnt_high; /* 0x10c */ + u32 padding_2[2]; /* [0x110..0x114] */ + u32 rd_engine_hshake_cnt_low; /* 0x118 */ + u32 rd_engine_hshake_cnt_high; /* 0x11c */ + u32 padding_3[2]; /* [0x120..0x124] */ + u32 wr_ch0_pwr_en; /* 0x128 */ + u32 wr_ch1_pwr_en; /* 0x12c */ + u32 wr_ch2_pwr_en; /* 0x130 */ + u32 wr_ch3_pwr_en; /* 0x134 */ + u32 wr_ch4_pwr_en; /* 0x138 */ + u32 wr_ch5_pwr_en; /* 0x13c */ + u32 wr_ch6_pwr_en; /* 0x140 */ + u32 wr_ch7_pwr_en; /* 0x144 */ + u32 padding_4[8]; /* [0x148..0x164] */ + u32 rd_ch0_pwr_en; /* 0x168 */ + u32 rd_ch1_pwr_en; /* 0x16c */ + u32 rd_ch2_pwr_en; /* 0x170 */ + u32 rd_ch3_pwr_en; /* 0x174 */ + u32 rd_ch4_pwr_en; /* 0x178 */ + u32 rd_ch5_pwr_en; /* 0x18c */ + u32 rd_ch6_pwr_en; /* 0x180 */ + u32 rd_ch7_pwr_en; /* 0x184 */ + u32 padding_5[30]; /* [0x188..0x1fc] */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ +}; + +struct dw_edma_v0_legacy { + u32 viewport_sel; /* 0x0f8 */ + struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ +}; + +struct dw_edma_v0_regs { + /* eDMA global registers */ + u32 ctrl_data_arb_prior; /* 0x000 */ + u32 padding_1; /* 0x004 */ + u32 ctrl; /* 0x008 */ + u32 wr_engine_en; /* 0x00c */ + u32 wr_doorbell; /* 0x010 */ + u32 padding_2; /* 0x014 */ + u32 wr_ch_arb_weight_low; /* 0x018 */ + u32 wr_ch_arb_weight_high; /* 0x01c */ + u32 padding_3[3]; /* [0x020..0x028] */ + u32 rd_engine_en; /* 0x02c */ + u32 rd_doorbell; /* 0x030 */ + u32 padding_4; /* 0x034 */ + u32 rd_ch_arb_weight_low; /* 0x038 */ + u32 rd_ch_arb_weight_high; /* 0x03c */ + u32 padding_5[3]; /* [0x040..0x048] */ + /* eDMA interrupts registers */ + u32 wr_int_status; /* 0x04c */ + u32 padding_6; /* 0x050 */ + u32 wr_int_mask; /* 0x054 */ + u32 wr_int_clear; /* 0x058 */ + u32 wr_err_status; /* 0x05c */ + u32 wr_done_imwr_low; /* 0x060 */ + u32 wr_done_imwr_high; /* 0x064 */ + u32 wr_abort_imwr_low; /* 0x068 */ + u32 wr_abort_imwr_high; /* 0x06c */ + u32 wr_ch01_imwr_data; /* 0x070 */ + u32 wr_ch23_imwr_data; /* 0x074 */ + u32 wr_ch45_imwr_data; /* 0x078 */ + u32 wr_ch67_imwr_data; /* 0x07c */ + u32 padding_7[4]; /* [0x080..0x08c] */ + u32 wr_linked_list_err_en; /* 0x090 */ + u32 padding_8[3]; /* [0x094..0x09c] */ + u32 rd_int_status; /* 0x0a0 */ + u32 padding_9; /* 0x0a4 */ + u32 rd_int_mask; /* 0x0a8 */ + u32 rd_int_clear; /* 0x0ac */ + u32 padding_10; /* 0x0b0 */ + u32 rd_err_status_low; /* 0x0b4 */ + u32 rd_err_status_high; /* 0x0b8 */ + u32 padding_11[2]; /* [0x0bc..0x0c0] */ + u32 rd_linked_list_err_en; /* 0x0c4 */ + u32 padding_12; /* 0x0c8 */ + u32 rd_done_imwr_low; /* 0x0cc */ + u32 rd_done_imwr_high; /* 0x0d0 */ + u32 rd_abort_imwr_low; /* 0x0d4 */ + u32 rd_abort_imwr_high; /* 0x0d8 */ + u32 rd_ch01_imwr_data; /* 0x0dc */ + u32 rd_ch23_imwr_data; /* 0x0e0 */ + u32 rd_ch45_imwr_data; /* 0x0e4 */ + u32 rd_ch67_imwr_data; /* 0x0e8 */ + u32 padding_13[4]; /* [0x0ec..0x0f8] */ + /* eDMA channel context grouping */ + union dw_edma_v0_type { + struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ + struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ + } type; +}; + +struct dw_edma_v0_lli { + u32 control; + u32 transfer_size; + u32 sar_low; + u32 sar_high; + u32 dar_low; + u32 dar_high; +}; + +struct dw_edma_v0_llp { + u32 control; + u32 reserved; + u32 llp_low; + u32 llp_high; +}; + +#endif /* _DW_EDMA_V0_REGS_H */ |