From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/dma/dw-edma/Kconfig | 19 + drivers/dma/dw-edma/Makefile | 7 + drivers/dma/dw-edma/dw-edma-core.c | 1036 ++++++++++++++++++++++++++++++ drivers/dma/dw-edma/dw-edma-core.h | 155 +++++ drivers/dma/dw-edma/dw-edma-pcie.c | 364 +++++++++++ drivers/dma/dw-edma/dw-edma-v0-core.c | 511 +++++++++++++++ drivers/dma/dw-edma/dw-edma-v0-core.h | 28 + drivers/dma/dw-edma/dw-edma-v0-debugfs.c | 314 +++++++++ drivers/dma/dw-edma/dw-edma-v0-debugfs.h | 27 + drivers/dma/dw-edma/dw-edma-v0-regs.h | 233 +++++++ 10 files changed, 2694 insertions(+) create mode 100644 drivers/dma/dw-edma/Kconfig create mode 100644 drivers/dma/dw-edma/Makefile create mode 100644 drivers/dma/dw-edma/dw-edma-core.c create mode 100644 drivers/dma/dw-edma/dw-edma-core.h create mode 100644 drivers/dma/dw-edma/dw-edma-pcie.c create mode 100644 drivers/dma/dw-edma/dw-edma-v0-core.c create mode 100644 drivers/dma/dw-edma/dw-edma-v0-core.h create mode 100644 drivers/dma/dw-edma/dw-edma-v0-debugfs.c create mode 100644 drivers/dma/dw-edma/dw-edma-v0-debugfs.h create mode 100644 drivers/dma/dw-edma/dw-edma-v0-regs.h (limited to 'drivers/dma/dw-edma') diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 000000000..7ff17b2db --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 + +config DW_EDMA + tristate "Synopsys DesignWare eDMA controller driver" + depends on PCI && PCI_MSI + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Synopsys DesignWare eDMA controller, normally + implemented on endpoints SoCs. + +config DW_EDMA_PCIE + tristate "Synopsys DesignWare eDMA PCIe driver" + depends on PCI && PCI_MSI + select DW_EDMA + help + Provides a glue-logic between the Synopsys DesignWare + eDMA controller and an endpoint PCIe device. This also serves + as a reference design to whom desires to use this IP. diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 000000000..8d45c0d56 --- /dev/null +++ b/drivers/dma/dw-edma/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o +dw-edma-objs := dw-edma-core.o \ + dw-edma-v0-core.o $(dw-edma-y) +obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 000000000..ef4cdcf6b --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -0,0 +1,1036 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + +static inline +struct device *dchan2dev(struct dma_chan *dchan) +{ + return &dchan->dev->device; +} + +static inline +struct device *chan2dev(struct dw_edma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline +struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dw_edma_desc, vd); +} + +static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *burst; + + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); + if (unlikely(!burst)) + return NULL; + + INIT_LIST_HEAD(&burst->list); + if (chunk->burst) { + /* Create and add new element into the linked list */ + chunk->bursts_alloc++; + list_add_tail(&burst->list, &chunk->burst->list); + } else { + /* List head */ + chunk->bursts_alloc = 0; + chunk->burst = burst; + } + + return burst; +} + +static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chip *chip = desc->chan->dw->chip; + struct dw_edma_chan *chan = desc->chan; + struct dw_edma_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + if (unlikely(!chunk)) + return NULL; + + INIT_LIST_HEAD(&chunk->list); + chunk->chan = chan; + /* Toggling change bit (CB) in each chunk, this is a mechanism to + * inform the eDMA HW block that this is a new linked list ready + * to be consumed. + * - Odd chunks originate CB equal to 0 + * - Even chunks originate CB equal to 1 + */ + chunk->cb = !(desc->chunks_alloc % 2); + if (chan->dir == EDMA_DIR_WRITE) { + chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr; + chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr; + } else { + chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr; + chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr; + } + + if (desc->chunk) { + /* Create and add new element into the linked list */ + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); + } else { + /* List head */ + chunk->burst = NULL; + desc->chunks_alloc = 0; + desc->chunk = chunk; + } + + return chunk; +} + +static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + if (!dw_edma_alloc_chunk(desc)) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void dw_edma_free_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child, *_next; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { + list_del(&child->list); + kfree(child); + chunk->bursts_alloc--; + } + + /* Remove the list head */ + kfree(child); + chunk->burst = NULL; +} + +static void dw_edma_free_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chunk *child, *_next; + + if (!desc->chunk) + return; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + } + + /* Remove the list head */ + kfree(child); + desc->chunk = NULL; +} + +static void dw_edma_free_desc(struct dw_edma_desc *desc) +{ + dw_edma_free_chunk(desc); + kfree(desc); +} + +static void vchan_free_desc(struct virt_dma_desc *vdesc) +{ + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); +} + +static int dw_edma_start_transfer(struct dw_edma_chan *chan) +{ + struct dw_edma_chunk *child; + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) + return 0; + + desc = vd2dw_edma_desc(vd); + if (!desc) + return 0; + + child = list_first_entry_or_null(&desc->chunk->list, + struct dw_edma_chunk, list); + if (!child) + return 0; + + dw_edma_v0_core_start(child, !desc->xfer_sz); + desc->xfer_sz += child->ll_region.sz; + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + + return 1; +} + +static int dw_edma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + chan->configured = true; + + return 0; +} + +static int dw_edma_device_pause(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) + err = -EPERM; + else if (chan->status != EDMA_ST_BUSY) + err = -EPERM; + else if (chan->request != EDMA_REQ_NONE) + err = -EPERM; + else + chan->request = EDMA_REQ_PAUSE; + + return err; +} + +static int dw_edma_device_resume(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + err = -EPERM; + } else if (chan->status != EDMA_ST_PAUSE) { + err = -EPERM; + } else if (chan->request != EDMA_REQ_NONE) { + err = -EPERM; + } else { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + + return err; +} + +static int dw_edma_device_terminate_all(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + /* Do nothing */ + } else if (chan->status == EDMA_ST_PAUSE) { + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->status == EDMA_ST_IDLE) { + chan->configured = false; + } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { + /* + * The channel is in a false BUSY state, probably didn't + * receive or lost an interrupt + */ + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->request > EDMA_REQ_PAUSE) { + err = -EPERM; + } else { + chan->request = EDMA_REQ_STOP; + } + + return err; +} + +static void dw_edma_device_issue_pending(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + unsigned long flags; + + if (!chan->configured) + return; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE && + chan->status == EDMA_ST_IDLE) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status +dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) + ret = DMA_PAUSED; + + if (!txstate) + goto ret_residue; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + desc = vd2dw_edma_desc(vd); + if (desc) + residue = desc->alloc_sz - desc->xfer_sz; + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + +ret_residue: + dma_set_residue(txstate, residue); + + return ret; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_transfer(struct dw_edma_transfer *xfer) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); + enum dma_transfer_direction dir = xfer->direction; + phys_addr_t src_addr, dst_addr; + struct scatterlist *sg = NULL; + struct dw_edma_chunk *chunk; + struct dw_edma_burst *burst; + struct dw_edma_desc *desc; + u32 cnt = 0; + int i; + + if (!chan->configured) + return NULL; + + /* + * Local Root Port/End-point Remote End-point + * +-----------------------+ PCIe bus +----------------------+ + * | | +-+ | | + * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM | + * | | | | | | + * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV | + * | | +-+ | | + * +-----------------------+ +----------------------+ + * + * 1. Normal logic: + * If eDMA is embedded into the DW PCIe RP/EP and controlled from the + * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used + * for the device read operations (DEV_TO_MEM) and the Tx channel + * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV). + * + * 2. Inverted logic: + * If eDMA is embedded into a Remote PCIe EP and is controlled by the + * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx + * channel (EDMA_DIR_WRITE) will be used for the device read operations + * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write + * operations (MEM_TO_DEV). + * + * It is the client driver responsibility to choose a proper channel + * for the DMA transfers. + */ + if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) || + (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV)) + return NULL; + } else { + if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) || + (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV)) + return NULL; + } + + if (xfer->type == EDMA_XFER_CYCLIC) { + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) + return NULL; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + if (xfer->xfer.sg.len < 1) + return NULL; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (!xfer->xfer.il->numf) + return NULL; + if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0) + return NULL; + } else { + return NULL; + } + + desc = dw_edma_alloc_desc(chan); + if (unlikely(!desc)) + goto err_alloc; + + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + + if (xfer->type == EDMA_XFER_INTERLEAVED) { + src_addr = xfer->xfer.il->src_start; + dst_addr = xfer->xfer.il->dst_start; + } else { + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + } + + if (xfer->type == EDMA_XFER_CYCLIC) { + cnt = xfer->xfer.cyclic.cnt; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + cnt = xfer->xfer.sg.len; + sg = xfer->xfer.sg.sgl; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (xfer->xfer.il->numf > 0) + cnt = xfer->xfer.il->numf; + else + cnt = xfer->xfer.il->frame_size; + } + + for (i = 0; i < cnt; i++) { + if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) + break; + + if (chunk->bursts_alloc == chan->ll_max) { + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + } + + burst = dw_edma_alloc_burst(chunk); + if (unlikely(!burst)) + goto err_alloc; + + if (xfer->type == EDMA_XFER_CYCLIC) + burst->sz = xfer->xfer.cyclic.len; + else if (xfer->type == EDMA_XFER_SCATTER_GATHER) + burst->sz = sg_dma_len(sg); + else if (xfer->type == EDMA_XFER_INTERLEAVED) + burst->sz = xfer->xfer.il->sgl[i].size; + + chunk->ll_region.sz += burst->sz; + desc->alloc_sz += burst->sz; + + if (dir == DMA_DEV_TO_MEM) { + burst->sar = src_addr; + if (xfer->type == EDMA_XFER_CYCLIC) { + burst->dar = xfer->xfer.cyclic.paddr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + src_addr += sg_dma_len(sg); + burst->dar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + burst->dar = dst_addr; + } + } else { + burst->dar = dst_addr; + if (xfer->type == EDMA_XFER_CYCLIC) { + burst->sar = xfer->xfer.cyclic.paddr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + dst_addr += sg_dma_len(sg); + burst->sar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + burst->sar = src_addr; + } + } + + if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + sg = sg_next(sg); + } else if (xfer->type == EDMA_XFER_INTERLEAVED && + xfer->xfer.il->frame_size > 0) { + struct dma_interleaved_template *il = xfer->xfer.il; + struct data_chunk *dc = &il->sgl[i]; + + if (il->src_sgl) { + src_addr += burst->sz; + src_addr += dmaengine_get_src_icg(il, dc); + } + + if (il->dst_sgl) { + dst_addr += burst->sz; + dst_addr += dmaengine_get_dst_icg(il, dc); + } + } + } + + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); + +err_alloc: + if (desc) + dw_edma_free_desc(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.sg.sgl = sgl; + xfer.xfer.sg.len = len; + xfer.flags = flags; + xfer.type = EDMA_XFER_SCATTER_GATHER; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, + size_t len, size_t count, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.cyclic.paddr = paddr; + xfer.xfer.cyclic.len = len; + xfer.xfer.cyclic.cnt = count; + xfer.flags = flags; + xfer.type = EDMA_XFER_CYCLIC; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, + struct dma_interleaved_template *ilt, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = ilt->dir; + xfer.xfer.il = ilt; + xfer.flags = flags; + xfer.type = EDMA_XFER_INTERLEAVED; + + return dw_edma_device_transfer(&xfer); +} + +static void dw_edma_done_interrupt(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_done_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + switch (chan->request) { + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (!desc->chunks_alloc) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + + /* Continue transferring if there are remaining chunks or issued requests. + */ + chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; + break; + + case EDMA_REQ_STOP: + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + break; + + case EDMA_REQ_PAUSE: + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_PAUSE; + break; + + default: + break; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) +{ + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_abort_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; +} + +static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) +{ + struct dw_edma_irq *dw_irq = data; + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + unsigned long off; + u32 mask; + + if (write) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + val = dw_edma_v0_core_status_done_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_done_interrupt(chan); + } + + val = dw_edma_v0_core_status_abort_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_abort_interrupt(chan); + } + + return IRQ_HANDLED; +} + +static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, true); +} + +static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, false); +} + +static irqreturn_t dw_edma_interrupt_common(int irq, void *data) +{ + dw_edma_interrupt(irq, data, true); + dw_edma_interrupt(irq, data, false); + + return IRQ_HANDLED; +} + +static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->status != EDMA_ST_IDLE) + return -EBUSY; + + return 0; +} + +static void dw_edma_free_chan_resources(struct dma_chan *dchan) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + int ret; + + while (time_before(jiffies, timeout)) { + ret = dw_edma_device_terminate_all(dchan); + if (!ret) + break; + + if (time_after_eq(jiffies, timeout)) + return; + + cpu_relax(); + } +} + +static int dw_edma_channel_setup(struct dw_edma *dw, bool write, + u32 wr_alloc, u32 rd_alloc) +{ + struct dw_edma_chip *chip = dw->chip; + struct dw_edma_region *dt_region; + struct device *dev = chip->dev; + struct dw_edma_chan *chan; + struct dw_edma_irq *irq; + struct dma_device *dma; + u32 alloc, off_alloc; + u32 i, j, cnt; + int err = 0; + u32 pos; + + if (write) { + i = 0; + cnt = dw->wr_ch_cnt; + dma = &dw->wr_edma; + alloc = wr_alloc; + off_alloc = 0; + } else { + i = dw->wr_ch_cnt; + cnt = dw->rd_ch_cnt; + dma = &dw->rd_edma; + alloc = rd_alloc; + off_alloc = wr_alloc; + } + + INIT_LIST_HEAD(&dma->channels); + for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { + chan = &dw->chan[i]; + + dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); + if (!dt_region) + return -ENOMEM; + + chan->vc.chan.private = dt_region; + + chan->dw = dw; + chan->id = j; + chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; + chan->configured = false; + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + + if (write) + chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ); + else + chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ); + chan->ll_max -= 1; + + dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", + write ? "write" : "read", j, chan->ll_max); + + if (dw->nr_irqs == 1) + pos = 0; + else + pos = off_alloc + (j % alloc); + + irq = &dw->irq[pos]; + + if (write) + irq->wr_mask |= BIT(j); + else + irq->rd_mask |= BIT(j); + + irq->dw = dw; + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); + + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", + write ? "write" : "read", j, + chan->msi.address_hi, chan->msi.address_lo, + chan->msi.data); + + chan->vc.desc_free = vchan_free_desc; + vchan_init(&chan->vc, dma); + + if (write) { + dt_region->paddr = chip->dt_region_wr[j].paddr; + dt_region->vaddr = chip->dt_region_wr[j].vaddr; + dt_region->sz = chip->dt_region_wr[j].sz; + } else { + dt_region->paddr = chip->dt_region_rd[j].paddr; + dt_region->vaddr = chip->dt_region_rd[j].vaddr; + dt_region->sz = chip->dt_region_rd[j].sz; + } + + dw_edma_v0_core_device_config(chan); + } + + /* Set DMA channel capabilities */ + dma_cap_zero(dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); + dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma->chancnt = cnt; + + /* Set DMA channel callbacks */ + dma->dev = chip->dev; + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; + dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_config = dw_edma_device_config; + dma->device_pause = dw_edma_device_pause; + dma->device_resume = dw_edma_device_resume; + dma->device_terminate_all = dw_edma_device_terminate_all; + dma->device_issue_pending = dw_edma_device_issue_pending; + dma->device_tx_status = dw_edma_device_tx_status; + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; + + dma_set_max_seg_size(dma->dev, U32_MAX); + + /* Register DMA device */ + err = dma_async_device_register(dma); + + return err; +} + +static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) +{ + if (*nr_irqs && *alloc < cnt) { + (*alloc)++; + (*nr_irqs)--; + } +} + +static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) +{ + while (*mask * alloc < cnt) + (*mask)++; +} + +static int dw_edma_irq_request(struct dw_edma *dw, + u32 *wr_alloc, u32 *rd_alloc) +{ + struct dw_edma_chip *chip = dw->chip; + struct device *dev = dw->chip->dev; + u32 wr_mask = 1; + u32 rd_mask = 1; + int i, err = 0; + u32 ch_cnt; + int irq; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + + if (chip->nr_irqs < 1 || !chip->ops->irq_vector) + return -EINVAL; + + dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL); + if (!dw->irq) + return -ENOMEM; + + if (chip->nr_irqs == 1) { + /* Common IRQ shared among all channels */ + irq = chip->ops->irq_vector(dev, 0); + err = request_irq(irq, dw_edma_interrupt_common, + IRQF_SHARED, dw->name, &dw->irq[0]); + if (err) { + dw->nr_irqs = 0; + return err; + } + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[0].msi); + + dw->nr_irqs = 1; + } else { + /* Distribute IRQs equally among all channels */ + int tmp = chip->nr_irqs; + + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); + } + + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); + + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { + irq = chip->ops->irq_vector(dev, i); + err = request_irq(irq, + i < *wr_alloc ? + dw_edma_interrupt_write : + dw_edma_interrupt_read, + IRQF_SHARED, dw->name, + &dw->irq[i]); + if (err) { + dw->nr_irqs = i; + return err; + } + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[i].msi); + } + + dw->nr_irqs = i; + } + + return err; +} + +int dw_edma_probe(struct dw_edma_chip *chip) +{ + struct device *dev; + struct dw_edma *dw; + u32 wr_alloc = 0; + u32 rd_alloc = 0; + int i, err; + + if (!chip) + return -EINVAL; + + dev = chip->dev; + if (!dev || !chip->ops) + return -EINVAL; + + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw->chip = chip; + + raw_spin_lock_init(&dw->lock); + + dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE)); + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); + + dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ)); + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); + + if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) + return -EINVAL; + + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", + dw->wr_ch_cnt, dw->rd_ch_cnt); + + /* Allocate channels */ + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, + sizeof(*dw->chan), GFP_KERNEL); + if (!dw->chan) + return -ENOMEM; + + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); + + /* Disable eDMA, only to establish the ideal initial conditions */ + dw_edma_v0_core_off(dw); + + /* Request IRQs */ + err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc); + if (err) + return err; + + /* Setup write channels */ + err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Setup read channels */ + err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Turn debugfs on */ + dw_edma_v0_core_debugfs_on(dw); + + chip->dw = dw; + + return 0; + +err_irq_free: + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); + + return err; +} +EXPORT_SYMBOL_GPL(dw_edma_probe); + +int dw_edma_remove(struct dw_edma_chip *chip) +{ + struct dw_edma_chan *chan, *_chan; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + int i; + + /* Disable eDMA */ + dw_edma_v0_core_off(dw); + + /* Free irqs */ + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); + + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); + list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, + vc.chan.device_node) { + tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); + } + + dma_async_device_unregister(&dw->rd_edma); + list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, + vc.chan.device_node) { + tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); + } + + /* Turn debugfs off */ + dw_edma_v0_core_debugfs_off(dw); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_edma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); +MODULE_AUTHOR("Gustavo Pimentel "); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 000000000..85df2d511 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_CORE_H +#define _DW_EDMA_CORE_H + +#include +#include + +#include "../virt-dma.h" + +#define EDMA_LL_SZ 24 + +enum dw_edma_dir { + EDMA_DIR_WRITE = 0, + EDMA_DIR_READ +}; + +enum dw_edma_request { + EDMA_REQ_NONE = 0, + EDMA_REQ_STOP, + EDMA_REQ_PAUSE +}; + +enum dw_edma_status { + EDMA_ST_IDLE = 0, + EDMA_ST_PAUSE, + EDMA_ST_BUSY +}; + +enum dw_edma_xfer_type { + EDMA_XFER_SCATTER_GATHER = 0, + EDMA_XFER_CYCLIC, + EDMA_XFER_INTERLEAVED +}; + +struct dw_edma_chan; +struct dw_edma_chunk; + +struct dw_edma_burst { + struct list_head list; + u64 sar; + u64 dar; + u32 sz; +}; + +struct dw_edma_chunk { + struct list_head list; + struct dw_edma_chan *chan; + struct dw_edma_burst *burst; + + u32 bursts_alloc; + + u8 cb; + struct dw_edma_region ll_region; /* Linked list */ +}; + +struct dw_edma_desc { + struct virt_dma_desc vd; + struct dw_edma_chan *chan; + struct dw_edma_chunk *chunk; + + u32 chunks_alloc; + + u32 alloc_sz; + u32 xfer_sz; +}; + +struct dw_edma_chan { + struct virt_dma_chan vc; + struct dw_edma *dw; + int id; + enum dw_edma_dir dir; + + u32 ll_max; + + struct msi_msg msi; + + enum dw_edma_request request; + enum dw_edma_status status; + u8 configured; + + struct dma_slave_config config; +}; + +struct dw_edma_irq { + struct msi_msg msi; + u32 wr_mask; + u32 rd_mask; + struct dw_edma *dw; +}; + +struct dw_edma { + char name[20]; + + struct dma_device wr_edma; + u16 wr_ch_cnt; + + struct dma_device rd_edma; + u16 rd_ch_cnt; + + struct dw_edma_irq *irq; + int nr_irqs; + + struct dw_edma_chan *chan; + + raw_spinlock_t lock; /* Only for legacy */ + + struct dw_edma_chip *chip; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct dw_edma_sg { + struct scatterlist *sgl; + unsigned int len; +}; + +struct dw_edma_cyclic { + dma_addr_t paddr; + size_t len; + size_t cnt; +}; + +struct dw_edma_transfer { + struct dma_chan *dchan; + union dw_edma_xfer { + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + struct dma_interleaved_template *il; + } xfer; + enum dma_transfer_direction direction; + unsigned long flags; + enum dw_edma_xfer_type type; +}; + +static inline +struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct dw_edma_chan, vc); +} + +static inline +struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) +{ + return vc2dw_edma_chan(to_virt_chan(dchan)); +} + +#endif /* _DW_EDMA_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 000000000..d6b5e2463 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA PCIe driver + * + * Author: Gustavo Pimentel + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw-edma-core.h" + +#define DW_PCIE_VSEC_DMA_ID 0x6 +#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8) +#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0) +#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0) +#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16) + +#define DW_BLOCK(a, b, c) \ + { \ + .bar = a, \ + .off = b, \ + .sz = c, \ + }, + +struct dw_edma_block { + enum pci_barno bar; + off_t off; + size_t sz; +}; + +struct dw_edma_pcie_data { + /* eDMA registers location */ + struct dw_edma_block rg; + /* eDMA memory linked list location */ + struct dw_edma_block ll_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block ll_rd[EDMA_MAX_RD_CH]; + /* eDMA memory data location */ + struct dw_edma_block dt_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block dt_rd[EDMA_MAX_RD_CH]; + /* Other */ + enum dw_edma_map_format mf; + u8 irqs; + u16 wr_ch_cnt; + u16 rd_ch_cnt; +}; + +static const struct dw_edma_pcie_data snps_edda_data = { + /* eDMA registers location */ + .rg.bar = BAR_0, + .rg.off = 0x00001000, /* 4 Kbytes */ + .rg.sz = 0x00002000, /* 8 Kbytes */ + /* eDMA memory linked list location */ + .ll_wr = { + /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00000000, 0x00000800) + /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00200000, 0x00000800) + }, + .ll_rd = { + /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00400000, 0x00000800) + /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00600000, 0x00000800) + }, + /* eDMA memory data location */ + .dt_wr = { + /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00800000, 0x00000800) + /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00900000, 0x00000800) + }, + .dt_rd = { + /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00a00000, 0x00000800) + /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00b00000, 0x00000800) + }, + /* Other */ + .mf = EDMA_MF_EDMA_UNROLL, + .irqs = 1, + .wr_ch_cnt = 2, + .rd_ch_cnt = 2, +}; + +static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) +{ + return pci_irq_vector(to_pci_dev(dev), nr); +} + +static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { + .irq_vector = dw_edma_pcie_irq_vector, +}; + +static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev, + struct dw_edma_pcie_data *pdata) +{ + u32 val, map; + u16 vsec; + u64 off; + + vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS, + DW_PCIE_VSEC_DMA_ID); + if (!vsec) + return; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x00 || + PCI_VNDR_HEADER_LEN(val) != 0x18) + return; + + pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n"); + pci_read_config_dword(pdev, vsec + 0x8, &val); + map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val); + if (map != EDMA_MF_EDMA_LEGACY && + map != EDMA_MF_EDMA_UNROLL && + map != EDMA_MF_HDMA_COMPAT) + return; + + pdata->mf = map; + pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val); + + pci_read_config_dword(pdev, vsec + 0xc, &val); + pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val)); + pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val)); + + pci_read_config_dword(pdev, vsec + 0x14, &val); + off = val; + pci_read_config_dword(pdev, vsec + 0x10, &val); + off <<= 32; + off |= val; + pdata->rg.off = off; +} + +static int dw_edma_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *pid) +{ + struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct dw_edma_pcie_data vsec_data; + struct device *dev = &pdev->dev; + struct dw_edma_chip *chip; + int err, nr_irqs; + int i, mask; + + /* Enable PCI device */ + err = pcim_enable_device(pdev); + if (err) { + pci_err(pdev, "enabling device failed\n"); + return err; + } + + memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); + + /* + * Tries to find if exists a PCIe Vendor-Specific Extended Capability + * for the DMA, if one exists, then reconfigures it. + */ + dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); + + /* Mapping PCI BAR regions */ + mask = BIT(vsec_data.rg.bar); + for (i = 0; i < vsec_data.wr_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_wr[i].bar); + mask |= BIT(vsec_data.dt_wr[i].bar); + } + for (i = 0; i < vsec_data.rd_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_rd[i].bar); + mask |= BIT(vsec_data.dt_rd[i].bar); + } + err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); + if (err) { + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); + return err; + } + + pci_set_master(pdev); + + /* DMA configuration */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "DMA mask 64 set failed\n"); + return err; + } + + /* Data structure allocation */ + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + /* IRQs allocation */ + nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (nr_irqs < 1) { + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", + nr_irqs); + return -EPERM; + } + + /* Data structure initialization */ + chip->dev = dev; + chip->id = pdev->devfn; + + chip->mf = vsec_data.mf; + chip->nr_irqs = nr_irqs; + chip->ops = &dw_edma_pcie_core_ops; + + chip->ll_wr_cnt = vsec_data.wr_ch_cnt; + chip->ll_rd_cnt = vsec_data.rd_ch_cnt; + + chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; + if (!chip->reg_base) + return -ENOMEM; + + for (i = 0; i < chip->ll_wr_cnt; i++) { + struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; + struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; + + ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr) + return -ENOMEM; + + ll_region->vaddr += ll_block->off; + ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr) + return -ENOMEM; + + dt_region->vaddr += dt_block->off; + dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } + + for (i = 0; i < chip->ll_rd_cnt; i++) { + struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; + struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; + + ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr) + return -ENOMEM; + + ll_region->vaddr += ll_block->off; + ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr) + return -ENOMEM; + + dt_region->vaddr += dt_block->off; + dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } + + /* Debug info */ + if (chip->mf == EDMA_MF_EDMA_LEGACY) + pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf); + else if (chip->mf == EDMA_MF_EDMA_UNROLL) + pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf); + else if (chip->mf == EDMA_MF_HDMA_COMPAT) + pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf); + else + pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); + + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n", + vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, + chip->reg_base); + + + for (i = 0; i < chip->ll_wr_cnt; i++) { + pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_wr[i].bar, + vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, + chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr); + + pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_wr[i].bar, + vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, + chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr); + } + + for (i = 0; i < chip->ll_rd_cnt; i++) { + pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_rd[i].bar, + vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, + chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr); + + pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_rd[i].bar, + vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, + chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr); + } + + pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs); + + /* Validating if PCI interrupts were enabled */ + if (!pci_dev_msi_enabled(pdev)) { + pci_err(pdev, "enable interrupt failed\n"); + return -EPERM; + } + + /* Starting eDMA driver */ + err = dw_edma_probe(chip); + if (err) { + pci_err(pdev, "eDMA probe failed\n"); + return err; + } + + /* Saving data structure reference */ + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_edma_pcie_remove(struct pci_dev *pdev) +{ + struct dw_edma_chip *chip = pci_get_drvdata(pdev); + int err; + + /* Stopping eDMA driver */ + err = dw_edma_remove(chip); + if (err) + pci_warn(pdev, "can't remove device properly: %d\n", err); + + /* Freeing IRQs */ + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id dw_edma_pcie_id_table[] = { + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); + +static struct pci_driver dw_edma_pcie_driver = { + .name = "dw-edma-pcie", + .id_table = dw_edma_pcie_id_table, + .probe = dw_edma_pcie_probe, + .remove = dw_edma_pcie_remove, +}; + +module_pci_driver(dw_edma_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); +MODULE_AUTHOR("Gustavo Pimentel "); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 000000000..a3816ba63 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#include + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-v0-debugfs.h" + +enum dw_edma_control { + DW_EDMA_V0_CB = BIT(0), + DW_EDMA_V0_TCB = BIT(1), + DW_EDMA_V0_LLP = BIT(2), + DW_EDMA_V0_LIE = BIT(3), + DW_EDMA_V0_RIE = BIT(4), + DW_EDMA_V0_CCS = BIT(8), + DW_EDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return dw->chip->reg_base; +} + +#define SET_32(dw, name, value) \ + writel(value, &(__dw_regs(dw)->name)) + +#define GET_32(dw, name) \ + readl(&(__dw_regs(dw)->name)) + +#define SET_RW_32(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_32(dw, wr_##name, value); \ + else \ + SET_32(dw, rd_##name, value); \ + } while (0) + +#define GET_RW_32(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET_32(dw, wr_##name) \ + : GET_32(dw, rd_##name)) + +#define SET_BOTH_32(dw, name, value) \ + do { \ + SET_32(dw, wr_##name, value); \ + SET_32(dw, rd_##name, value); \ + } while (0) + +#ifdef CONFIG_64BIT + +#define SET_64(dw, name, value) \ + writeq(value, &(__dw_regs(dw)->name)) + +#define GET_64(dw, name) \ + readq(&(__dw_regs(dw)->name)) + +#define SET_RW_64(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_64(dw, wr_##name, value); \ + else \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#define GET_RW_64(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET_64(dw, wr_##name) \ + : GET_64(dw, rd_##name)) + +#define SET_BOTH_64(dw, name, value) \ + do { \ + SET_64(dw, wr_##name, value); \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#endif /* CONFIG_64BIT */ + +#define SET_COMPAT(dw, name, value) \ + writel(value, &(__dw_regs(dw)->type.unroll.name)) + +#define SET_RW_COMPAT(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_COMPAT(dw, wr_##name, value); \ + else \ + SET_COMPAT(dw, rd_##name, value); \ + } while (0) + +static inline struct dw_edma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) + return &(__dw_regs(dw)->type.legacy.ch); + + if (dir == EDMA_DIR_WRITE) + return &__dw_regs(dw)->type.unroll.ch[ch].wr; + + return &__dw_regs(dw)->type.unroll.ch[ch].rd; +} + +static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u32 value, void __iomem *addr) +{ + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writel(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writel(value, addr); + } +} + +static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readl(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readl(addr); + } + + return value; +} + +#define SET_CH_32(dw, dir, ch, name, value) \ + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH_32(dw, dir, ch, name) \ + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL_32(ll, value) \ + writel(value, ll) + +#ifdef CONFIG_64BIT + +static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u64 value, void __iomem *addr) +{ + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writeq(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writeq(value, addr); + } +} + +static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u64 value; + + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readq(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readq(addr); + } + + return value; +} + +#define SET_CH_64(dw, dir, ch, name, value) \ + writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH_64(dw, dir, ch, name) \ + readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL_64(ll, value) \ + writeq(value, ll) + +#endif /* CONFIG_64BIT */ + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *dw) +{ + SET_BOTH_32(dw, int_mask, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, int_clear, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, engine_en, 0); +} + +u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch; + + if (dir == EDMA_DIR_WRITE) + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, + GET_32(dw, ctrl)); + else + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, + GET_32(dw, ctrl)); + + if (num_ch > EDMA_V0_MAX_NR_CH) + num_ch = EDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + u32 tmp; + + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, + GET_CH_32(dw, chan->dir, chan->id, ch_control1)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); +} + +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); +} + +u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_DONE_INT_MASK, + GET_RW_32(dw, dir, int_status)); +} + +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, + GET_RW_32(dw, dir, int_status)); +} + +static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma_v0_lli __iomem *lli; + struct dw_edma_v0_llp __iomem *llp; + u32 control = 0, i = 0; + int j; + + lli = chunk->ll_region.vaddr; + + if (chunk->cb) + control = DW_EDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) { + control |= DW_EDMA_V0_LIE; + if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + control |= DW_EDMA_V0_RIE; + } + /* Channel control */ + SET_LL_32(&lli[i].control, control); + /* Transfer size */ + SET_LL_32(&lli[i].transfer_size, child->sz); + /* SAR */ + #ifdef CONFIG_64BIT + SET_LL_64(&lli[i].sar.reg, child->sar); + #else /* CONFIG_64BIT */ + SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar)); + SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar)); + #endif /* CONFIG_64BIT */ + /* DAR */ + #ifdef CONFIG_64BIT + SET_LL_64(&lli[i].dar.reg, child->dar); + #else /* CONFIG_64BIT */ + SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar)); + SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar)); + #endif /* CONFIG_64BIT */ + i++; + } + + llp = (void __iomem *)&lli[i]; + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; + if (!chunk->cb) + control |= DW_EDMA_V0_CB; + + /* Channel control */ + SET_LL_32(&llp->control, control); + /* Linked list */ + #ifdef CONFIG_64BIT + SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr); + #else /* CONFIG_64BIT */ + SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr)); + SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr)); + #endif /* CONFIG_64BIT */ +} + +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->dw; + u32 tmp; + + dw_edma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_RW_32(dw, chan->dir, engine_en, BIT(0)); + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + switch (chan->id) { + case 0: + SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, + BIT(0)); + break; + case 1: + SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en, + BIT(0)); + break; + case 2: + SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en, + BIT(0)); + break; + case 3: + SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en, + BIT(0)); + break; + case 4: + SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en, + BIT(0)); + break; + case 5: + SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en, + BIT(0)); + break; + case 6: + SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en, + BIT(0)); + break; + case 7: + SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en, + BIT(0)); + break; + } + } + /* Interrupt unmask - done, abort */ + tmp = GET_RW_32(dw, chan->dir, int_mask); + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); + SET_RW_32(dw, chan->dir, int_mask, tmp); + /* Linked list error */ + tmp = GET_RW_32(dw, chan->dir, linked_list_err_en); + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); + SET_RW_32(dw, chan->dir, linked_list_err_en, tmp); + /* Channel control */ + SET_CH_32(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list */ + /* llp is not aligned on 64bit -> keep 32bit accesses */ + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, llp.msb, + upper_32_bits(chunk->ll_region.paddr)); + } + /* Doorbell */ + SET_RW_32(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); +} + +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + u32 tmp = 0; + + /* MSI done addr - low, high */ + SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi); + /* MSI data - low, high */ + switch (chan->id) { + case 0: + case 1: + tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data); + break; + + case 2: + case 3: + tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data); + break; + + case 4: + case 5: + tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data); + break; + + case 6: + case 7: + tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data); + break; + } + + if (chan->id & BIT(0)) { + /* Channel odd {1, 3, 5, 7} */ + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, + chan->msi.data); + } else { + /* Channel even {0, 2, 4, 6} */ + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, + chan->msi.data); + } + + switch (chan->id) { + case 0: + case 1: + SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp); + break; + + case 2: + case 3: + SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp); + break; + + case 4: + case 5: + SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp); + break; + + case 6: + case 7: + SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp); + break; + } + + return 0; +} + +/* eDMA debugfs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma *dw) +{ + dw_edma_v0_debugfs_on(dw); +} + +void dw_edma_v0_core_debugfs_off(struct dw_edma *dw) +{ + dw_edma_v0_debugfs_off(dw); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 000000000..75aec6d31 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_CORE_H +#define _DW_EDMA_V0_CORE_H + +#include + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *chan); +u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); +u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); +/* eDMA debug fs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma *dw); +void dw_edma_v0_core_debugfs_off(struct dw_edma *dw); + +#endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 000000000..5226c9014 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#include +#include + +#include "dw-edma-v0-debugfs.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(name) \ + ((void __force *)®s->name) +#define REGISTER(name) \ + { #name, REGS_ADDR(name) } + +#define WR_REGISTER(name) \ + { #name, REGS_ADDR(wr_##name) } +#define RD_REGISTER(name) \ + { #name, REGS_ADDR(rd_##name) } + +#define WR_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.wr_##name) } +#define RD_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.rd_##name) } + +#define WR_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.rd_##name) } + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +static struct dw_edma *dw; +static struct dw_edma_v0_regs __iomem *regs; + +static struct { + void __iomem *start; + void __iomem *end; +} lim[2][EDMA_V0_MAX_NR_CH]; + +struct debugfs_entries { + const char *name; + dma_addr_t *reg; +}; + +static int dw_edma_debugfs_u32_get(void *data, u64 *val) +{ + void __iomem *reg = (void __force __iomem *)data; + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY && + reg >= (void __iomem *)®s->type.legacy.ch) { + void __iomem *ptr = ®s->type.legacy.ch; + u32 viewport_sel = 0; + unsigned long flags; + u16 ch; + + for (ch = 0; ch < dw->wr_ch_cnt; ch++) + if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { + ptr += (reg - lim[0][ch].start); + goto legacy_sel_wr; + } + + for (ch = 0; ch < dw->rd_ch_cnt; ch++) + if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { + ptr += (reg - lim[1][ch].start); + goto legacy_sel_rd; + } + + return 0; +legacy_sel_rd: + viewport_sel = BIT(31); +legacy_sel_wr: + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + + raw_spin_lock_irqsave(&dw->lock, flags); + + writel(viewport_sel, ®s->type.legacy.viewport_sel); + *val = readl(ptr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + *val = readl(reg); + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], + int nr_entries, struct dentry *dir) +{ + int i; + + for (i = 0; i < nr_entries; i++) { + if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, + entries[i].reg, &fops_x32)) + break; + } +} + +static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, + struct dentry *dir) +{ + int nr_entries; + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ch_control1), + REGISTER(ch_control2), + REGISTER(transfer_size), + REGISTER(sar.lsb), + REGISTER(sar.msb), + REGISTER(dar.lsb), + REGISTER(dar.msb), + REGISTER(llp.lsb), + REGISTER(llp.msb), + }; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); +} + +static void dw_edma_debugfs_regs_wr(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + WR_REGISTER(engine_en), + WR_REGISTER(doorbell), + WR_REGISTER(ch_arb_weight.lsb), + WR_REGISTER(ch_arb_weight.msb), + /* eDMA interrupts registers */ + WR_REGISTER(int_status), + WR_REGISTER(int_mask), + WR_REGISTER(int_clear), + WR_REGISTER(err_status), + WR_REGISTER(done_imwr.lsb), + WR_REGISTER(done_imwr.msb), + WR_REGISTER(abort_imwr.lsb), + WR_REGISTER(abort_imwr.msb), + WR_REGISTER(ch01_imwr_data), + WR_REGISTER(ch23_imwr_data), + WR_REGISTER(ch45_imwr_data), + WR_REGISTER(ch67_imwr_data), + WR_REGISTER(linked_list_err_en), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + WR_REGISTER_UNROLL(engine_chgroup), + WR_REGISTER_UNROLL(engine_hshake_cnt.lsb), + WR_REGISTER_UNROLL(engine_hshake_cnt.msb), + WR_REGISTER_UNROLL(ch0_pwr_en), + WR_REGISTER_UNROLL(ch1_pwr_en), + WR_REGISTER_UNROLL(ch2_pwr_en), + WR_REGISTER_UNROLL(ch3_pwr_en), + WR_REGISTER_UNROLL(ch4_pwr_en), + WR_REGISTER_UNROLL(ch5_pwr_en), + WR_REGISTER_UNROLL(ch6_pwr_en), + WR_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(WRITE_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); + + lim[0][i].start = ®s->type.unroll.ch[i].wr; + lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; + } +} + +static void dw_edma_debugfs_regs_rd(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + RD_REGISTER(engine_en), + RD_REGISTER(doorbell), + RD_REGISTER(ch_arb_weight.lsb), + RD_REGISTER(ch_arb_weight.msb), + /* eDMA interrupts registers */ + RD_REGISTER(int_status), + RD_REGISTER(int_mask), + RD_REGISTER(int_clear), + RD_REGISTER(err_status.lsb), + RD_REGISTER(err_status.msb), + RD_REGISTER(linked_list_err_en), + RD_REGISTER(done_imwr.lsb), + RD_REGISTER(done_imwr.msb), + RD_REGISTER(abort_imwr.lsb), + RD_REGISTER(abort_imwr.msb), + RD_REGISTER(ch01_imwr_data), + RD_REGISTER(ch23_imwr_data), + RD_REGISTER(ch45_imwr_data), + RD_REGISTER(ch67_imwr_data), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + RD_REGISTER_UNROLL(engine_chgroup), + RD_REGISTER_UNROLL(engine_hshake_cnt.lsb), + RD_REGISTER_UNROLL(engine_hshake_cnt.msb), + RD_REGISTER_UNROLL(ch0_pwr_en), + RD_REGISTER_UNROLL(ch1_pwr_en), + RD_REGISTER_UNROLL(ch2_pwr_en), + RD_REGISTER_UNROLL(ch3_pwr_en), + RD_REGISTER_UNROLL(ch4_pwr_en), + RD_REGISTER_UNROLL(ch5_pwr_en), + RD_REGISTER_UNROLL(ch6_pwr_en), + RD_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(READ_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); + + lim[1][i].start = ®s->type.unroll.ch[i].rd; + lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; + } +} + +static void dw_edma_debugfs_regs(void) +{ + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ctrl_data_arb_prior), + REGISTER(ctrl), + }; + struct dentry *regs_dir; + int nr_entries; + + regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + dw_edma_debugfs_regs_wr(regs_dir); + dw_edma_debugfs_regs_rd(regs_dir); +} + +void dw_edma_v0_debugfs_on(struct dw_edma *_dw) +{ + dw = _dw; + if (!dw) + return; + + regs = dw->chip->reg_base; + if (!regs) + return; + + dw->debugfs = debugfs_create_dir(dw->name, NULL); + if (!dw->debugfs) + return; + + debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf); + debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt); + + dw_edma_debugfs_regs(); +} + +void dw_edma_v0_debugfs_off(struct dw_edma *_dw) +{ + dw = _dw; + if (!dw) + return; + + debugfs_remove_recursive(dw->debugfs); + dw->debugfs = NULL; +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 000000000..3391b86ed --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_DEBUG_FS_H +#define _DW_EDMA_V0_DEBUG_FS_H + +#include + +#ifdef CONFIG_DEBUG_FS +void dw_edma_v0_debugfs_on(struct dw_edma *dw); +void dw_edma_v0_debugfs_off(struct dw_edma *dw); +#else +static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw) +{ +} + +static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_EDMA_V0_DEBUG_FS_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 000000000..e175f7b20 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_REGS_H +#define _DW_EDMA_V0_REGS_H + +#include + +#define EDMA_V0_MAX_NR_CH 8 +#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) +#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) +#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) +#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) +#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) +#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) +#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) +#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) + +#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) +#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) + +struct dw_edma_v0_ch_regs { + u32 ch_control1; /* 0x0000 */ + u32 ch_control2; /* 0x0004 */ + u32 transfer_size; /* 0x0008 */ + union { + u64 reg; /* 0x000c..0x0010 */ + struct { + u32 lsb; /* 0x000c */ + u32 msb; /* 0x0010 */ + }; + } sar; + union { + u64 reg; /* 0x0014..0x0018 */ + struct { + u32 lsb; /* 0x0014 */ + u32 msb; /* 0x0018 */ + }; + } dar; + union { + u64 reg; /* 0x001c..0x0020 */ + struct { + u32 lsb; /* 0x001c */ + u32 msb; /* 0x0020 */ + }; + } llp; +} __packed; + +struct dw_edma_v0_ch { + struct dw_edma_v0_ch_regs wr; /* 0x0200 */ + u32 padding_1[55]; /* 0x0224..0x02fc */ + struct dw_edma_v0_ch_regs rd; /* 0x0300 */ + u32 padding_2[55]; /* 0x0324..0x03fc */ +} __packed; + +struct dw_edma_v0_unroll { + u32 padding_1; /* 0x00f8 */ + u32 wr_engine_chgroup; /* 0x0100 */ + u32 rd_engine_chgroup; /* 0x0104 */ + union { + u64 reg; /* 0x0108..0x010c */ + struct { + u32 lsb; /* 0x0108 */ + u32 msb; /* 0x010c */ + }; + } wr_engine_hshake_cnt; + u32 padding_2[2]; /* 0x0110..0x0114 */ + union { + u64 reg; /* 0x0120..0x0124 */ + struct { + u32 lsb; /* 0x0120 */ + u32 msb; /* 0x0124 */ + }; + } rd_engine_hshake_cnt; + u32 padding_3[2]; /* 0x0120..0x0124 */ + u32 wr_ch0_pwr_en; /* 0x0128 */ + u32 wr_ch1_pwr_en; /* 0x012c */ + u32 wr_ch2_pwr_en; /* 0x0130 */ + u32 wr_ch3_pwr_en; /* 0x0134 */ + u32 wr_ch4_pwr_en; /* 0x0138 */ + u32 wr_ch5_pwr_en; /* 0x013c */ + u32 wr_ch6_pwr_en; /* 0x0140 */ + u32 wr_ch7_pwr_en; /* 0x0144 */ + u32 padding_4[8]; /* 0x0148..0x0164 */ + u32 rd_ch0_pwr_en; /* 0x0168 */ + u32 rd_ch1_pwr_en; /* 0x016c */ + u32 rd_ch2_pwr_en; /* 0x0170 */ + u32 rd_ch3_pwr_en; /* 0x0174 */ + u32 rd_ch4_pwr_en; /* 0x0178 */ + u32 rd_ch5_pwr_en; /* 0x018c */ + u32 rd_ch6_pwr_en; /* 0x0180 */ + u32 rd_ch7_pwr_en; /* 0x0184 */ + u32 padding_5[30]; /* 0x0188..0x01fc */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */ +} __packed; + +struct dw_edma_v0_legacy { + u32 viewport_sel; /* 0x00f8 */ + struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */ +} __packed; + +struct dw_edma_v0_regs { + /* eDMA global registers */ + u32 ctrl_data_arb_prior; /* 0x0000 */ + u32 padding_1; /* 0x0004 */ + u32 ctrl; /* 0x0008 */ + u32 wr_engine_en; /* 0x000c */ + u32 wr_doorbell; /* 0x0010 */ + u32 padding_2; /* 0x0014 */ + union { + u64 reg; /* 0x0018..0x001c */ + struct { + u32 lsb; /* 0x0018 */ + u32 msb; /* 0x001c */ + }; + } wr_ch_arb_weight; + u32 padding_3[3]; /* 0x0020..0x0028 */ + u32 rd_engine_en; /* 0x002c */ + u32 rd_doorbell; /* 0x0030 */ + u32 padding_4; /* 0x0034 */ + union { + u64 reg; /* 0x0038..0x003c */ + struct { + u32 lsb; /* 0x0038 */ + u32 msb; /* 0x003c */ + }; + } rd_ch_arb_weight; + u32 padding_5[3]; /* 0x0040..0x0048 */ + /* eDMA interrupts registers */ + u32 wr_int_status; /* 0x004c */ + u32 padding_6; /* 0x0050 */ + u32 wr_int_mask; /* 0x0054 */ + u32 wr_int_clear; /* 0x0058 */ + u32 wr_err_status; /* 0x005c */ + union { + u64 reg; /* 0x0060..0x0064 */ + struct { + u32 lsb; /* 0x0060 */ + u32 msb; /* 0x0064 */ + }; + } wr_done_imwr; + union { + u64 reg; /* 0x0068..0x006c */ + struct { + u32 lsb; /* 0x0068 */ + u32 msb; /* 0x006c */ + }; + } wr_abort_imwr; + u32 wr_ch01_imwr_data; /* 0x0070 */ + u32 wr_ch23_imwr_data; /* 0x0074 */ + u32 wr_ch45_imwr_data; /* 0x0078 */ + u32 wr_ch67_imwr_data; /* 0x007c */ + u32 padding_7[4]; /* 0x0080..0x008c */ + u32 wr_linked_list_err_en; /* 0x0090 */ + u32 padding_8[3]; /* 0x0094..0x009c */ + u32 rd_int_status; /* 0x00a0 */ + u32 padding_9; /* 0x00a4 */ + u32 rd_int_mask; /* 0x00a8 */ + u32 rd_int_clear; /* 0x00ac */ + u32 padding_10; /* 0x00b0 */ + union { + u64 reg; /* 0x00b4..0x00b8 */ + struct { + u32 lsb; /* 0x00b4 */ + u32 msb; /* 0x00b8 */ + }; + } rd_err_status; + u32 padding_11[2]; /* 0x00bc..0x00c0 */ + u32 rd_linked_list_err_en; /* 0x00c4 */ + u32 padding_12; /* 0x00c8 */ + union { + u64 reg; /* 0x00cc..0x00d0 */ + struct { + u32 lsb; /* 0x00cc */ + u32 msb; /* 0x00d0 */ + }; + } rd_done_imwr; + union { + u64 reg; /* 0x00d4..0x00d8 */ + struct { + u32 lsb; /* 0x00d4 */ + u32 msb; /* 0x00d8 */ + }; + } rd_abort_imwr; + u32 rd_ch01_imwr_data; /* 0x00dc */ + u32 rd_ch23_imwr_data; /* 0x00e0 */ + u32 rd_ch45_imwr_data; /* 0x00e4 */ + u32 rd_ch67_imwr_data; /* 0x00e8 */ + u32 padding_13[4]; /* 0x00ec..0x00f8 */ + /* eDMA channel context grouping */ + union dw_edma_v0_type { + struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */ + struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */ + } type; +} __packed; + +struct dw_edma_v0_lli { + u32 control; + u32 transfer_size; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } sar; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } dar; +} __packed; + +struct dw_edma_v0_llp { + u32 control; + u32 reserved; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } llp; +} __packed; + +#endif /* _DW_EDMA_V0_REGS_H */ -- cgit v1.2.3