diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/dma/dw-edma | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/dma/dw-edma/Kconfig | 22 | ||||
-rw-r--r-- | drivers/dma/dw-edma/Makefile | 9 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.c | 1016 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.h | 209 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-pcie.c | 378 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-core.c | 508 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-core.h | 17 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-debugfs.c | 292 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-debugfs.h | 22 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-v0-regs.h | 233 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-hdma-v0-core.c | 296 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-hdma-v0-core.h | 17 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-hdma-v0-debugfs.c | 170 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-hdma-v0-debugfs.h | 22 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-hdma-v0-regs.h | 129 |
15 files changed, 3340 insertions, 0 deletions
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 0000000000..2b6f267950 --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 + +config DW_EDMA + tristate "Synopsys DesignWare eDMA controller driver" + depends on PCI && PCI_MSI + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Synopsys DesignWare eDMA controller, normally + implemented on endpoints SoCs. + +if DW_EDMA + +config DW_EDMA_PCIE + tristate "Synopsys DesignWare eDMA PCIe driver" + depends on PCI && PCI_MSI + help + Provides a glue-logic between the Synopsys DesignWare + eDMA controller and an endpoint PCIe device. This also serves + as a reference design to whom desires to use this IP. + +endif # DW_EDMA diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 0000000000..83ab58f877 --- /dev/null +++ b/drivers/dma/dw-edma/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o \ + dw-hdma-v0-debugfs.o +dw-edma-objs := dw-edma-core.o \ + dw-edma-v0-core.o \ + dw-hdma-v0-core.o $(dw-edma-y) +obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 0000000000..6823624705 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -0,0 +1,1016 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/dma/edma.h> +#include <linux/dma-mapping.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-hdma-v0-core.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + +static inline +struct device *dchan2dev(struct dma_chan *dchan) +{ + return &dchan->dev->device; +} + +static inline +struct device *chan2dev(struct dw_edma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline +struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dw_edma_desc, vd); +} + +static inline +u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr) +{ + struct dw_edma_chip *chip = chan->dw->chip; + + if (chip->ops->pci_address) + return chip->ops->pci_address(chip->dev, cpu_addr); + + return cpu_addr; +} + +static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *burst; + + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); + if (unlikely(!burst)) + return NULL; + + INIT_LIST_HEAD(&burst->list); + if (chunk->burst) { + /* Create and add new element into the linked list */ + chunk->bursts_alloc++; + list_add_tail(&burst->list, &chunk->burst->list); + } else { + /* List head */ + chunk->bursts_alloc = 0; + chunk->burst = burst; + } + + return burst; +} + +static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chip *chip = desc->chan->dw->chip; + struct dw_edma_chan *chan = desc->chan; + struct dw_edma_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + if (unlikely(!chunk)) + return NULL; + + INIT_LIST_HEAD(&chunk->list); + chunk->chan = chan; + /* Toggling change bit (CB) in each chunk, this is a mechanism to + * inform the eDMA HW block that this is a new linked list ready + * to be consumed. + * - Odd chunks originate CB equal to 0 + * - Even chunks originate CB equal to 1 + */ + chunk->cb = !(desc->chunks_alloc % 2); + if (chan->dir == EDMA_DIR_WRITE) { + chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr; + chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr; + } else { + chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr; + chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr; + } + + if (desc->chunk) { + /* Create and add new element into the linked list */ + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); + } else { + /* List head */ + chunk->burst = NULL; + desc->chunks_alloc = 0; + desc->chunk = chunk; + } + + return chunk; +} + +static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + if (!dw_edma_alloc_chunk(desc)) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void dw_edma_free_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child, *_next; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { + list_del(&child->list); + kfree(child); + chunk->bursts_alloc--; + } + + /* Remove the list head */ + kfree(child); + chunk->burst = NULL; +} + +static void dw_edma_free_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chunk *child, *_next; + + if (!desc->chunk) + return; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + } + + /* Remove the list head */ + kfree(child); + desc->chunk = NULL; +} + +static void dw_edma_free_desc(struct dw_edma_desc *desc) +{ + dw_edma_free_chunk(desc); + kfree(desc); +} + +static void vchan_free_desc(struct virt_dma_desc *vdesc) +{ + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); +} + +static int dw_edma_start_transfer(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + struct dw_edma_chunk *child; + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) + return 0; + + desc = vd2dw_edma_desc(vd); + if (!desc) + return 0; + + child = list_first_entry_or_null(&desc->chunk->list, + struct dw_edma_chunk, list); + if (!child) + return 0; + + dw_edma_core_start(dw, child, !desc->xfer_sz); + desc->xfer_sz += child->ll_region.sz; + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + + return 1; +} + +static void dw_edma_device_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + if (chan->dir == EDMA_DIR_READ) + caps->directions = BIT(DMA_DEV_TO_MEM); + else + caps->directions = BIT(DMA_MEM_TO_DEV); + } else { + if (chan->dir == EDMA_DIR_WRITE) + caps->directions = BIT(DMA_DEV_TO_MEM); + else + caps->directions = BIT(DMA_MEM_TO_DEV); + } +} + +static int dw_edma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + chan->configured = true; + + return 0; +} + +static int dw_edma_device_pause(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) + err = -EPERM; + else if (chan->status != EDMA_ST_BUSY) + err = -EPERM; + else if (chan->request != EDMA_REQ_NONE) + err = -EPERM; + else + chan->request = EDMA_REQ_PAUSE; + + return err; +} + +static int dw_edma_device_resume(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + err = -EPERM; + } else if (chan->status != EDMA_ST_PAUSE) { + err = -EPERM; + } else if (chan->request != EDMA_REQ_NONE) { + err = -EPERM; + } else { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + + return err; +} + +static int dw_edma_device_terminate_all(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + /* Do nothing */ + } else if (chan->status == EDMA_ST_PAUSE) { + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->status == EDMA_ST_IDLE) { + chan->configured = false; + } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) { + /* + * The channel is in a false BUSY state, probably didn't + * receive or lost an interrupt + */ + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->request > EDMA_REQ_PAUSE) { + err = -EPERM; + } else { + chan->request = EDMA_REQ_STOP; + } + + return err; +} + +static void dw_edma_device_issue_pending(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + unsigned long flags; + + if (!chan->configured) + return; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE && + chan->status == EDMA_ST_IDLE) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status +dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) + ret = DMA_PAUSED; + + if (!txstate) + goto ret_residue; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + desc = vd2dw_edma_desc(vd); + if (desc) + residue = desc->alloc_sz - desc->xfer_sz; + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + +ret_residue: + dma_set_residue(txstate, residue); + + return ret; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_transfer(struct dw_edma_transfer *xfer) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); + enum dma_transfer_direction dir = xfer->direction; + struct scatterlist *sg = NULL; + struct dw_edma_chunk *chunk; + struct dw_edma_burst *burst; + struct dw_edma_desc *desc; + u64 src_addr, dst_addr; + size_t fsz = 0; + u32 cnt = 0; + int i; + + if (!chan->configured) + return NULL; + + /* + * Local Root Port/End-point Remote End-point + * +-----------------------+ PCIe bus +----------------------+ + * | | +-+ | | + * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM | + * | | | | | | + * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV | + * | | +-+ | | + * +-----------------------+ +----------------------+ + * + * 1. Normal logic: + * If eDMA is embedded into the DW PCIe RP/EP and controlled from the + * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used + * for the device read operations (DEV_TO_MEM) and the Tx channel + * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV). + * + * 2. Inverted logic: + * If eDMA is embedded into a Remote PCIe EP and is controlled by the + * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx + * channel (EDMA_DIR_WRITE) will be used for the device read operations + * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write + * operations (MEM_TO_DEV). + * + * It is the client driver responsibility to choose a proper channel + * for the DMA transfers. + */ + if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) || + (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV)) + return NULL; + } else { + if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) || + (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV)) + return NULL; + } + + if (xfer->type == EDMA_XFER_CYCLIC) { + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) + return NULL; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + if (xfer->xfer.sg.len < 1) + return NULL; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1) + return NULL; + if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc) + return NULL; + } else { + return NULL; + } + + desc = dw_edma_alloc_desc(chan); + if (unlikely(!desc)) + goto err_alloc; + + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + + if (xfer->type == EDMA_XFER_INTERLEAVED) { + src_addr = xfer->xfer.il->src_start; + dst_addr = xfer->xfer.il->dst_start; + } else { + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + } + + if (dir == DMA_DEV_TO_MEM) + src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr); + else + dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr); + + if (xfer->type == EDMA_XFER_CYCLIC) { + cnt = xfer->xfer.cyclic.cnt; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + cnt = xfer->xfer.sg.len; + sg = xfer->xfer.sg.sgl; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size; + fsz = xfer->xfer.il->frame_size; + } + + for (i = 0; i < cnt; i++) { + if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) + break; + + if (chunk->bursts_alloc == chan->ll_max) { + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + } + + burst = dw_edma_alloc_burst(chunk); + if (unlikely(!burst)) + goto err_alloc; + + if (xfer->type == EDMA_XFER_CYCLIC) + burst->sz = xfer->xfer.cyclic.len; + else if (xfer->type == EDMA_XFER_SCATTER_GATHER) + burst->sz = sg_dma_len(sg); + else if (xfer->type == EDMA_XFER_INTERLEAVED) + burst->sz = xfer->xfer.il->sgl[i % fsz].size; + + chunk->ll_region.sz += burst->sz; + desc->alloc_sz += burst->sz; + + if (dir == DMA_DEV_TO_MEM) { + burst->sar = src_addr; + if (xfer->type == EDMA_XFER_CYCLIC) { + burst->dar = xfer->xfer.cyclic.paddr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + src_addr += sg_dma_len(sg); + burst->dar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + burst->dar = dst_addr; + } + } else { + burst->dar = dst_addr; + if (xfer->type == EDMA_XFER_CYCLIC) { + burst->sar = xfer->xfer.cyclic.paddr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + dst_addr += sg_dma_len(sg); + burst->sar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + burst->sar = src_addr; + } + } + + if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + sg = sg_next(sg); + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + struct dma_interleaved_template *il = xfer->xfer.il; + struct data_chunk *dc = &il->sgl[i % fsz]; + + src_addr += burst->sz; + if (il->src_sgl) + src_addr += dmaengine_get_src_icg(il, dc); + + dst_addr += burst->sz; + if (il->dst_sgl) + dst_addr += dmaengine_get_dst_icg(il, dc); + } + } + + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); + +err_alloc: + if (desc) + dw_edma_free_desc(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.sg.sgl = sgl; + xfer.xfer.sg.len = len; + xfer.flags = flags; + xfer.type = EDMA_XFER_SCATTER_GATHER; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, + size_t len, size_t count, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.cyclic.paddr = paddr; + xfer.xfer.cyclic.len = len; + xfer.xfer.cyclic.cnt = count; + xfer.flags = flags; + xfer.type = EDMA_XFER_CYCLIC; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, + struct dma_interleaved_template *ilt, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = ilt->dir; + xfer.xfer.il = ilt; + xfer.flags = flags; + xfer.type = EDMA_XFER_INTERLEAVED; + + return dw_edma_device_transfer(&xfer); +} + +static void dw_edma_done_interrupt(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + switch (chan->request) { + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (!desc->chunks_alloc) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + + /* Continue transferring if there are remaining chunks or issued requests. + */ + chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; + break; + + case EDMA_REQ_STOP: + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + break; + + case EDMA_REQ_PAUSE: + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_PAUSE; + break; + + default: + break; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) +{ + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; +} + +static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) +{ + struct dw_edma_irq *dw_irq = data; + + return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE, + dw_edma_done_interrupt, + dw_edma_abort_interrupt); +} + +static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) +{ + struct dw_edma_irq *dw_irq = data; + + return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ, + dw_edma_done_interrupt, + dw_edma_abort_interrupt); +} + +static irqreturn_t dw_edma_interrupt_common(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + + ret |= dw_edma_interrupt_write(irq, data); + ret |= dw_edma_interrupt_read(irq, data); + + return ret; +} + +static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->status != EDMA_ST_IDLE) + return -EBUSY; + + return 0; +} + +static void dw_edma_free_chan_resources(struct dma_chan *dchan) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + int ret; + + while (time_before(jiffies, timeout)) { + ret = dw_edma_device_terminate_all(dchan); + if (!ret) + break; + + if (time_after_eq(jiffies, timeout)) + return; + + cpu_relax(); + } +} + +static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc) +{ + struct dw_edma_chip *chip = dw->chip; + struct device *dev = chip->dev; + struct dw_edma_chan *chan; + struct dw_edma_irq *irq; + struct dma_device *dma; + u32 i, ch_cnt; + u32 pos; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + dma = &dw->dma; + + INIT_LIST_HEAD(&dma->channels); + + for (i = 0; i < ch_cnt; i++) { + chan = &dw->chan[i]; + + chan->dw = dw; + + if (i < dw->wr_ch_cnt) { + chan->id = i; + chan->dir = EDMA_DIR_WRITE; + } else { + chan->id = i - dw->wr_ch_cnt; + chan->dir = EDMA_DIR_READ; + } + + chan->configured = false; + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + + if (chan->dir == EDMA_DIR_WRITE) + chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ); + else + chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ); + chan->ll_max -= 1; + + dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", + chan->dir == EDMA_DIR_WRITE ? "write" : "read", + chan->id, chan->ll_max); + + if (dw->nr_irqs == 1) + pos = 0; + else if (chan->dir == EDMA_DIR_WRITE) + pos = chan->id % wr_alloc; + else + pos = wr_alloc + chan->id % rd_alloc; + + irq = &dw->irq[pos]; + + if (chan->dir == EDMA_DIR_WRITE) + irq->wr_mask |= BIT(chan->id); + else + irq->rd_mask |= BIT(chan->id); + + irq->dw = dw; + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); + + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", + chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id, + chan->msi.address_hi, chan->msi.address_lo, + chan->msi.data); + + chan->vc.desc_free = vchan_free_desc; + chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ? + &dw->chip->dt_region_wr[chan->id] : + &dw->chip->dt_region_rd[chan->id]; + + vchan_init(&chan->vc, dma); + + dw_edma_core_ch_config(chan); + } + + /* Set DMA channel capabilities */ + dma_cap_zero(dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); + dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + /* Set DMA channel callbacks */ + dma->dev = chip->dev; + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; + dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_caps = dw_edma_device_caps; + dma->device_config = dw_edma_device_config; + dma->device_pause = dw_edma_device_pause; + dma->device_resume = dw_edma_device_resume; + dma->device_terminate_all = dw_edma_device_terminate_all; + dma->device_issue_pending = dw_edma_device_issue_pending; + dma->device_tx_status = dw_edma_device_tx_status; + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; + + dma_set_max_seg_size(dma->dev, U32_MAX); + + /* Register DMA device */ + return dma_async_device_register(dma); +} + +static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) +{ + if (*nr_irqs && *alloc < cnt) { + (*alloc)++; + (*nr_irqs)--; + } +} + +static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) +{ + while (*mask * alloc < cnt) + (*mask)++; +} + +static int dw_edma_irq_request(struct dw_edma *dw, + u32 *wr_alloc, u32 *rd_alloc) +{ + struct dw_edma_chip *chip = dw->chip; + struct device *dev = dw->chip->dev; + u32 wr_mask = 1; + u32 rd_mask = 1; + int i, err = 0; + u32 ch_cnt; + int irq; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + + if (chip->nr_irqs < 1 || !chip->ops->irq_vector) + return -EINVAL; + + dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL); + if (!dw->irq) + return -ENOMEM; + + if (chip->nr_irqs == 1) { + /* Common IRQ shared among all channels */ + irq = chip->ops->irq_vector(dev, 0); + err = request_irq(irq, dw_edma_interrupt_common, + IRQF_SHARED, dw->name, &dw->irq[0]); + if (err) { + dw->nr_irqs = 0; + return err; + } + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[0].msi); + + dw->nr_irqs = 1; + } else { + /* Distribute IRQs equally among all channels */ + int tmp = chip->nr_irqs; + + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); + } + + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); + + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { + irq = chip->ops->irq_vector(dev, i); + err = request_irq(irq, + i < *wr_alloc ? + dw_edma_interrupt_write : + dw_edma_interrupt_read, + IRQF_SHARED, dw->name, + &dw->irq[i]); + if (err) + goto err_irq_free; + + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[i].msi); + } + + dw->nr_irqs = i; + } + + return 0; + +err_irq_free: + for (i--; i >= 0; i--) { + irq = chip->ops->irq_vector(dev, i); + free_irq(irq, &dw->irq[i]); + } + + return err; +} + +int dw_edma_probe(struct dw_edma_chip *chip) +{ + struct device *dev; + struct dw_edma *dw; + u32 wr_alloc = 0; + u32 rd_alloc = 0; + int i, err; + + if (!chip) + return -EINVAL; + + dev = chip->dev; + if (!dev || !chip->ops) + return -EINVAL; + + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw->chip = chip; + + if (dw->chip->mf == EDMA_MF_HDMA_NATIVE) + dw_hdma_v0_core_register(dw); + else + dw_edma_v0_core_register(dw); + + raw_spin_lock_init(&dw->lock); + + dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt, + dw_edma_core_ch_count(dw, EDMA_DIR_WRITE)); + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); + + dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt, + dw_edma_core_ch_count(dw, EDMA_DIR_READ)); + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); + + if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) + return -EINVAL; + + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", + dw->wr_ch_cnt, dw->rd_ch_cnt); + + /* Allocate channels */ + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, + sizeof(*dw->chan), GFP_KERNEL); + if (!dw->chan) + return -ENOMEM; + + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s", + dev_name(chip->dev)); + + /* Disable eDMA, only to establish the ideal initial conditions */ + dw_edma_core_off(dw); + + /* Request IRQs */ + err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc); + if (err) + return err; + + /* Setup write/read channels */ + err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Turn debugfs on */ + dw_edma_core_debugfs_on(dw); + + chip->dw = dw; + + return 0; + +err_irq_free: + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); + + return err; +} +EXPORT_SYMBOL_GPL(dw_edma_probe); + +int dw_edma_remove(struct dw_edma_chip *chip) +{ + struct dw_edma_chan *chan, *_chan; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + int i; + + /* Skip removal if no private data found */ + if (!dw) + return -ENODEV; + + /* Disable eDMA */ + dw_edma_core_off(dw); + + /* Free irqs */ + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); + + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->dma); + list_for_each_entry_safe(chan, _chan, &dw->dma.channels, + vc.chan.device_node) { + tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); + } + + return 0; +} +EXPORT_SYMBOL_GPL(dw_edma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 0000000000..71894b9e0b --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_CORE_H +#define _DW_EDMA_CORE_H + +#include <linux/msi.h> +#include <linux/dma/edma.h> + +#include "../virt-dma.h" + +#define EDMA_LL_SZ 24 + +enum dw_edma_dir { + EDMA_DIR_WRITE = 0, + EDMA_DIR_READ +}; + +enum dw_edma_request { + EDMA_REQ_NONE = 0, + EDMA_REQ_STOP, + EDMA_REQ_PAUSE +}; + +enum dw_edma_status { + EDMA_ST_IDLE = 0, + EDMA_ST_PAUSE, + EDMA_ST_BUSY +}; + +enum dw_edma_xfer_type { + EDMA_XFER_SCATTER_GATHER = 0, + EDMA_XFER_CYCLIC, + EDMA_XFER_INTERLEAVED +}; + +struct dw_edma_chan; +struct dw_edma_chunk; + +struct dw_edma_burst { + struct list_head list; + u64 sar; + u64 dar; + u32 sz; +}; + +struct dw_edma_chunk { + struct list_head list; + struct dw_edma_chan *chan; + struct dw_edma_burst *burst; + + u32 bursts_alloc; + + u8 cb; + struct dw_edma_region ll_region; /* Linked list */ +}; + +struct dw_edma_desc { + struct virt_dma_desc vd; + struct dw_edma_chan *chan; + struct dw_edma_chunk *chunk; + + u32 chunks_alloc; + + u32 alloc_sz; + u32 xfer_sz; +}; + +struct dw_edma_chan { + struct virt_dma_chan vc; + struct dw_edma *dw; + int id; + enum dw_edma_dir dir; + + u32 ll_max; + + struct msi_msg msi; + + enum dw_edma_request request; + enum dw_edma_status status; + u8 configured; + + struct dma_slave_config config; +}; + +struct dw_edma_irq { + struct msi_msg msi; + u32 wr_mask; + u32 rd_mask; + struct dw_edma *dw; +}; + +struct dw_edma { + char name[32]; + + struct dma_device dma; + + u16 wr_ch_cnt; + u16 rd_ch_cnt; + + struct dw_edma_irq *irq; + int nr_irqs; + + struct dw_edma_chan *chan; + + raw_spinlock_t lock; /* Only for legacy */ + + struct dw_edma_chip *chip; + + const struct dw_edma_core_ops *core; +}; + +typedef void (*dw_edma_handler_t)(struct dw_edma_chan *); + +struct dw_edma_core_ops { + void (*off)(struct dw_edma *dw); + u16 (*ch_count)(struct dw_edma *dw, enum dw_edma_dir dir); + enum dma_status (*ch_status)(struct dw_edma_chan *chan); + irqreturn_t (*handle_int)(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, + dw_edma_handler_t done, dw_edma_handler_t abort); + void (*start)(struct dw_edma_chunk *chunk, bool first); + void (*ch_config)(struct dw_edma_chan *chan); + void (*debugfs_on)(struct dw_edma *dw); +}; + +struct dw_edma_sg { + struct scatterlist *sgl; + unsigned int len; +}; + +struct dw_edma_cyclic { + dma_addr_t paddr; + size_t len; + size_t cnt; +}; + +struct dw_edma_transfer { + struct dma_chan *dchan; + union dw_edma_xfer { + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + struct dma_interleaved_template *il; + } xfer; + enum dma_transfer_direction direction; + unsigned long flags; + enum dw_edma_xfer_type type; +}; + +static inline +struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct dw_edma_chan, vc); +} + +static inline +struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) +{ + return vc2dw_edma_chan(to_virt_chan(dchan)); +} + +static inline +void dw_edma_core_off(struct dw_edma *dw) +{ + dw->core->off(dw); +} + +static inline +u16 dw_edma_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return dw->core->ch_count(dw, dir); +} + +static inline +enum dma_status dw_edma_core_ch_status(struct dw_edma_chan *chan) +{ + return chan->dw->core->ch_status(chan); +} + +static inline irqreturn_t +dw_edma_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, + dw_edma_handler_t done, dw_edma_handler_t abort) +{ + return dw_irq->dw->core->handle_int(dw_irq, dir, done, abort); +} + +static inline +void dw_edma_core_start(struct dw_edma *dw, struct dw_edma_chunk *chunk, bool first) +{ + dw->core->start(chunk, first); +} + +static inline +void dw_edma_core_ch_config(struct dw_edma_chan *chan) +{ + chan->dw->core->ch_config(chan); +} + +static inline +void dw_edma_core_debugfs_on(struct dw_edma *dw) +{ + dw->core->debugfs_on(dw); +} + +#endif /* _DW_EDMA_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 0000000000..1c6043751d --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA PCIe driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/dma/edma.h> +#include <linux/pci-epf.h> +#include <linux/msi.h> +#include <linux/bitfield.h> + +#include "dw-edma-core.h" + +#define DW_PCIE_VSEC_DMA_ID 0x6 +#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8) +#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0) +#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0) +#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16) + +#define DW_BLOCK(a, b, c) \ + { \ + .bar = a, \ + .off = b, \ + .sz = c, \ + }, + +struct dw_edma_block { + enum pci_barno bar; + off_t off; + size_t sz; +}; + +struct dw_edma_pcie_data { + /* eDMA registers location */ + struct dw_edma_block rg; + /* eDMA memory linked list location */ + struct dw_edma_block ll_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block ll_rd[EDMA_MAX_RD_CH]; + /* eDMA memory data location */ + struct dw_edma_block dt_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block dt_rd[EDMA_MAX_RD_CH]; + /* Other */ + enum dw_edma_map_format mf; + u8 irqs; + u16 wr_ch_cnt; + u16 rd_ch_cnt; +}; + +static const struct dw_edma_pcie_data snps_edda_data = { + /* eDMA registers location */ + .rg.bar = BAR_0, + .rg.off = 0x00001000, /* 4 Kbytes */ + .rg.sz = 0x00002000, /* 8 Kbytes */ + /* eDMA memory linked list location */ + .ll_wr = { + /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00000000, 0x00000800) + /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00200000, 0x00000800) + }, + .ll_rd = { + /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00400000, 0x00000800) + /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00600000, 0x00000800) + }, + /* eDMA memory data location */ + .dt_wr = { + /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00800000, 0x00000800) + /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00900000, 0x00000800) + }, + .dt_rd = { + /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00a00000, 0x00000800) + /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00b00000, 0x00000800) + }, + /* Other */ + .mf = EDMA_MF_EDMA_UNROLL, + .irqs = 1, + .wr_ch_cnt = 2, + .rd_ch_cnt = 2, +}; + +static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) +{ + return pci_irq_vector(to_pci_dev(dev), nr); +} + +static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus_region region; + struct resource res = { + .flags = IORESOURCE_MEM, + .start = cpu_addr, + .end = cpu_addr, + }; + + pcibios_resource_to_bus(pdev->bus, ®ion, &res); + return region.start; +} + +static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = { + .irq_vector = dw_edma_pcie_irq_vector, + .pci_address = dw_edma_pcie_address, +}; + +static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev, + struct dw_edma_pcie_data *pdata) +{ + u32 val, map; + u16 vsec; + u64 off; + + vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS, + DW_PCIE_VSEC_DMA_ID); + if (!vsec) + return; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x00 || + PCI_VNDR_HEADER_LEN(val) != 0x18) + return; + + pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n"); + pci_read_config_dword(pdev, vsec + 0x8, &val); + map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val); + if (map != EDMA_MF_EDMA_LEGACY && + map != EDMA_MF_EDMA_UNROLL && + map != EDMA_MF_HDMA_COMPAT) + return; + + pdata->mf = map; + pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val); + + pci_read_config_dword(pdev, vsec + 0xc, &val); + pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val)); + pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val)); + + pci_read_config_dword(pdev, vsec + 0x14, &val); + off = val; + pci_read_config_dword(pdev, vsec + 0x10, &val); + off <<= 32; + off |= val; + pdata->rg.off = off; +} + +static int dw_edma_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *pid) +{ + struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct dw_edma_pcie_data vsec_data; + struct device *dev = &pdev->dev; + struct dw_edma_chip *chip; + int err, nr_irqs; + int i, mask; + + /* Enable PCI device */ + err = pcim_enable_device(pdev); + if (err) { + pci_err(pdev, "enabling device failed\n"); + return err; + } + + memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); + + /* + * Tries to find if exists a PCIe Vendor-Specific Extended Capability + * for the DMA, if one exists, then reconfigures it. + */ + dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); + + /* Mapping PCI BAR regions */ + mask = BIT(vsec_data.rg.bar); + for (i = 0; i < vsec_data.wr_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_wr[i].bar); + mask |= BIT(vsec_data.dt_wr[i].bar); + } + for (i = 0; i < vsec_data.rd_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_rd[i].bar); + mask |= BIT(vsec_data.dt_rd[i].bar); + } + err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); + if (err) { + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); + return err; + } + + pci_set_master(pdev); + + /* DMA configuration */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "DMA mask 64 set failed\n"); + return err; + } + + /* Data structure allocation */ + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + /* IRQs allocation */ + nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (nr_irqs < 1) { + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", + nr_irqs); + return -EPERM; + } + + /* Data structure initialization */ + chip->dev = dev; + + chip->mf = vsec_data.mf; + chip->nr_irqs = nr_irqs; + chip->ops = &dw_edma_pcie_plat_ops; + + chip->ll_wr_cnt = vsec_data.wr_ch_cnt; + chip->ll_rd_cnt = vsec_data.rd_ch_cnt; + + chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; + if (!chip->reg_base) + return -ENOMEM; + + for (i = 0; i < chip->ll_wr_cnt; i++) { + struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; + struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; + + ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr.io) + return -ENOMEM; + + ll_region->vaddr.io += ll_block->off; + ll_region->paddr = pci_bus_address(pdev, ll_block->bar); + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr.io) + return -ENOMEM; + + dt_region->vaddr.io += dt_block->off; + dt_region->paddr = pci_bus_address(pdev, dt_block->bar); + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } + + for (i = 0; i < chip->ll_rd_cnt; i++) { + struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; + struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; + + ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr.io) + return -ENOMEM; + + ll_region->vaddr.io += ll_block->off; + ll_region->paddr = pci_bus_address(pdev, ll_block->bar); + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr.io) + return -ENOMEM; + + dt_region->vaddr.io += dt_block->off; + dt_region->paddr = pci_bus_address(pdev, dt_block->bar); + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } + + /* Debug info */ + if (chip->mf == EDMA_MF_EDMA_LEGACY) + pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf); + else if (chip->mf == EDMA_MF_EDMA_UNROLL) + pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf); + else if (chip->mf == EDMA_MF_HDMA_COMPAT) + pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf); + else + pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); + + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n", + vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, + chip->reg_base); + + + for (i = 0; i < chip->ll_wr_cnt; i++) { + pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_wr[i].bar, + vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, + chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr); + + pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_wr[i].bar, + vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, + chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr); + } + + for (i = 0; i < chip->ll_rd_cnt; i++) { + pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_rd[i].bar, + vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, + chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr); + + pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_rd[i].bar, + vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, + chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr); + } + + pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs); + + /* Validating if PCI interrupts were enabled */ + if (!pci_dev_msi_enabled(pdev)) { + pci_err(pdev, "enable interrupt failed\n"); + return -EPERM; + } + + /* Starting eDMA driver */ + err = dw_edma_probe(chip); + if (err) { + pci_err(pdev, "eDMA probe failed\n"); + return err; + } + + /* Saving data structure reference */ + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_edma_pcie_remove(struct pci_dev *pdev) +{ + struct dw_edma_chip *chip = pci_get_drvdata(pdev); + int err; + + /* Stopping eDMA driver */ + err = dw_edma_remove(chip); + if (err) + pci_warn(pdev, "can't remove device properly: %d\n", err); + + /* Freeing IRQs */ + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id dw_edma_pcie_id_table[] = { + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); + +static struct pci_driver dw_edma_pcie_driver = { + .name = "dw-edma-pcie", + .id_table = dw_edma_pcie_id_table, + .probe = dw_edma_pcie_probe, + .remove = dw_edma_pcie_remove, +}; + +module_pci_driver(dw_edma_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 0000000000..b38786f0ad --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/bitfield.h> +#include <linux/irqreturn.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-v0-debugfs.h" + +enum dw_edma_control { + DW_EDMA_V0_CB = BIT(0), + DW_EDMA_V0_TCB = BIT(1), + DW_EDMA_V0_LLP = BIT(2), + DW_EDMA_V0_LIE = BIT(3), + DW_EDMA_V0_RIE = BIT(4), + DW_EDMA_V0_CCS = BIT(8), + DW_EDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return dw->chip->reg_base; +} + +#define SET_32(dw, name, value) \ + writel(value, &(__dw_regs(dw)->name)) + +#define GET_32(dw, name) \ + readl(&(__dw_regs(dw)->name)) + +#define SET_RW_32(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_32(dw, wr_##name, value); \ + else \ + SET_32(dw, rd_##name, value); \ + } while (0) + +#define GET_RW_32(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET_32(dw, wr_##name) \ + : GET_32(dw, rd_##name)) + +#define SET_BOTH_32(dw, name, value) \ + do { \ + SET_32(dw, wr_##name, value); \ + SET_32(dw, rd_##name, value); \ + } while (0) + +#define SET_64(dw, name, value) \ + writeq(value, &(__dw_regs(dw)->name)) + +#define GET_64(dw, name) \ + readq(&(__dw_regs(dw)->name)) + +#define SET_RW_64(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_64(dw, wr_##name, value); \ + else \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#define GET_RW_64(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET_64(dw, wr_##name) \ + : GET_64(dw, rd_##name)) + +#define SET_BOTH_64(dw, name, value) \ + do { \ + SET_64(dw, wr_##name, value); \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#define SET_COMPAT(dw, name, value) \ + writel(value, &(__dw_regs(dw)->type.unroll.name)) + +#define SET_RW_COMPAT(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_COMPAT(dw, wr_##name, value); \ + else \ + SET_COMPAT(dw, rd_##name, value); \ + } while (0) + +static inline struct dw_edma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) + return &(__dw_regs(dw)->type.legacy.ch); + + if (dir == EDMA_DIR_WRITE) + return &__dw_regs(dw)->type.unroll.ch[ch].wr; + + return &__dw_regs(dw)->type.unroll.ch[ch].rd; +} + +static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u32 value, void __iomem *addr) +{ + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writel(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writel(value, addr); + } +} + +static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readl(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readl(addr); + } + + return value; +} + +#define SET_CH_32(dw, dir, ch, name, value) \ + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH_32(dw, dir, ch, name) \ + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +/* eDMA management callbacks */ +static void dw_edma_v0_core_off(struct dw_edma *dw) +{ + SET_BOTH_32(dw, int_mask, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, int_clear, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, engine_en, 0); +} + +static u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch; + + if (dir == EDMA_DIR_WRITE) + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, + GET_32(dw, ctrl)); + else + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, + GET_32(dw, ctrl)); + + if (num_ch > EDMA_V0_MAX_NR_CH) + num_ch = EDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +static enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + u32 tmp; + + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, + GET_CH_32(dw, chan->dir, chan->id, ch_control1)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +static void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); +} + +static void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); +} + +static u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_DONE_INT_MASK, + GET_RW_32(dw, dir, int_status)); +} + +static u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, + GET_RW_32(dw, dir, int_status)); +} + +static irqreturn_t +dw_edma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, + dw_edma_handler_t done, dw_edma_handler_t abort) +{ + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + irqreturn_t ret = IRQ_NONE; + struct dw_edma_chan *chan; + unsigned long off; + u32 mask; + + if (dir == EDMA_DIR_WRITE) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + val = dw_edma_v0_core_status_done_int(dw, dir); + val &= mask; + for_each_set_bit(pos, &val, total) { + chan = &dw->chan[pos + off]; + + dw_edma_v0_core_clear_done_int(chan); + done(chan); + + ret = IRQ_HANDLED; + } + + val = dw_edma_v0_core_status_abort_int(dw, dir); + val &= mask; + for_each_set_bit(pos, &val, total) { + chan = &dw->chan[pos + off]; + + dw_edma_v0_core_clear_abort_int(chan); + abort(chan); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, + u32 control, u32 size, u64 sar, u64 dar) +{ + ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; + + lli->control = control; + lli->transfer_size = size; + lli->sar.reg = sar; + lli->dar.reg = dar; + } else { + struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; + + writel(control, &lli->control); + writel(size, &lli->transfer_size); + writeq(sar, &lli->sar.reg); + writeq(dar, &lli->dar.reg); + } +} + +static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk, + int i, u32 control, u64 pointer) +{ + ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; + + llp->control = control; + llp->llp.reg = pointer; + } else { + struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; + + writel(control, &llp->control); + writeq(pointer, &llp->llp.reg); + } +} + +static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_chan *chan = chunk->chan; + u32 control = 0, i = 0; + int j; + + if (chunk->cb) + control = DW_EDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) { + control |= DW_EDMA_V0_LIE; + if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + control |= DW_EDMA_V0_RIE; + } + + dw_edma_v0_write_ll_data(chunk, i++, control, child->sz, + child->sar, child->dar); + } + + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; + if (!chunk->cb) + control |= DW_EDMA_V0_CB; + + dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); +} + +static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->dw; + u32 tmp; + + dw_edma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_RW_32(dw, chan->dir, engine_en, BIT(0)); + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + switch (chan->id) { + case 0: + SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, + BIT(0)); + break; + case 1: + SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en, + BIT(0)); + break; + case 2: + SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en, + BIT(0)); + break; + case 3: + SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en, + BIT(0)); + break; + case 4: + SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en, + BIT(0)); + break; + case 5: + SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en, + BIT(0)); + break; + case 6: + SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en, + BIT(0)); + break; + case 7: + SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en, + BIT(0)); + break; + } + } + /* Interrupt unmask - done, abort */ + tmp = GET_RW_32(dw, chan->dir, int_mask); + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); + SET_RW_32(dw, chan->dir, int_mask, tmp); + /* Linked list error */ + tmp = GET_RW_32(dw, chan->dir, linked_list_err_en); + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); + SET_RW_32(dw, chan->dir, linked_list_err_en, tmp); + /* Channel control */ + SET_CH_32(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list */ + /* llp is not aligned on 64bit -> keep 32bit accesses */ + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, llp.msb, + upper_32_bits(chunk->ll_region.paddr)); + } + /* Doorbell */ + SET_RW_32(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); +} + +static void dw_edma_v0_core_ch_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + u32 tmp = 0; + + /* MSI done addr - low, high */ + SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi); + /* MSI data - low, high */ + switch (chan->id) { + case 0: + case 1: + tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data); + break; + + case 2: + case 3: + tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data); + break; + + case 4: + case 5: + tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data); + break; + + case 6: + case 7: + tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data); + break; + } + + if (chan->id & BIT(0)) { + /* Channel odd {1, 3, 5, 7} */ + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, + chan->msi.data); + } else { + /* Channel even {0, 2, 4, 6} */ + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, + chan->msi.data); + } + + switch (chan->id) { + case 0: + case 1: + SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp); + break; + + case 2: + case 3: + SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp); + break; + + case 4: + case 5: + SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp); + break; + + case 6: + case 7: + SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp); + break; + } +} + +/* eDMA debugfs callbacks */ +static void dw_edma_v0_core_debugfs_on(struct dw_edma *dw) +{ + dw_edma_v0_debugfs_on(dw); +} + +static const struct dw_edma_core_ops dw_edma_v0_core = { + .off = dw_edma_v0_core_off, + .ch_count = dw_edma_v0_core_ch_count, + .ch_status = dw_edma_v0_core_ch_status, + .handle_int = dw_edma_v0_core_handle_int, + .start = dw_edma_v0_core_start, + .ch_config = dw_edma_v0_core_ch_config, + .debugfs_on = dw_edma_v0_core_debugfs_on, +}; + +void dw_edma_v0_core_register(struct dw_edma *dw) +{ + dw->core = &dw_edma_v0_core; +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 0000000000..04a882222f --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_CORE_H +#define _DW_EDMA_V0_CORE_H + +#include <linux/dma/edma.h> + +/* eDMA core register */ +void dw_edma_v0_core_register(struct dw_edma *dw); + +#endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 0000000000..0745d9e7d2 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/debugfs.h> +#include <linux/bitfield.h> + +#include "dw-edma-v0-debugfs.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(dw, name) \ + ({ \ + struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \ + \ + (void __iomem *)&__regs->name; \ + }) + +#define REGS_CH_ADDR(dw, name, _dir, _ch) \ + ({ \ + struct dw_edma_v0_ch_regs __iomem *__ch_regs; \ + \ + if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \ + __ch_regs = REGS_ADDR(dw, type.legacy.ch); \ + else if (_dir == EDMA_DIR_READ) \ + __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \ + else \ + __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \ + \ + (void __iomem *)&__ch_regs->name; \ + }) + +#define REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, name) } + +#define CTX_REGISTER(dw, name, dir, ch) \ + { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch } + +#define WR_REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, wr_##name) } +#define RD_REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, rd_##name) } + +#define WR_REGISTER_LEGACY(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) } +#define RD_REGISTER_LEGACY(name) \ + { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) } + +#define WR_REGISTER_UNROLL(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) } + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +struct dw_edma_debugfs_entry { + struct dw_edma *dw; + const char *name; + void __iomem *reg; + enum dw_edma_dir dir; + u16 ch; +}; + +static int dw_edma_debugfs_u32_get(void *data, u64 *val) +{ + struct dw_edma_debugfs_entry *entry = data; + struct dw_edma *dw = entry->dw; + void __iomem *reg = entry->reg; + + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY && + reg >= REGS_ADDR(dw, type.legacy.ch)) { + unsigned long flags; + u32 viewport_sel; + + viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0; + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch); + + raw_spin_lock_irqsave(&dw->lock, flags); + + writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel)); + *val = readl(reg); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + *val = readl(reg); + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_edma_debugfs_create_x32(struct dw_edma *dw, + const struct dw_edma_debugfs_entry ini[], + int nr_entries, struct dentry *dent) +{ + struct dw_edma_debugfs_entry *entries; + int i; + + entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries), + GFP_KERNEL); + if (!entries) + return; + + for (i = 0; i < nr_entries; i++) { + entries[i] = ini[i]; + + debugfs_create_file_unsafe(entries[i].name, 0444, dent, + &entries[i], &fops_x32); + } +} + +static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir, + u16 ch, struct dentry *dent) +{ + struct dw_edma_debugfs_entry debugfs_regs[] = { + CTX_REGISTER(dw, ch_control1, dir, ch), + CTX_REGISTER(dw, ch_control2, dir, ch), + CTX_REGISTER(dw, transfer_size, dir, ch), + CTX_REGISTER(dw, sar.lsb, dir, ch), + CTX_REGISTER(dw, sar.msb, dir, ch), + CTX_REGISTER(dw, dar.lsb, dir, ch), + CTX_REGISTER(dw, dar.msb, dir, ch), + CTX_REGISTER(dw, llp.lsb, dir, ch), + CTX_REGISTER(dw, llp.msb, dir, ch), + }; + int nr_entries; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent); +} + +static noinline_for_stack void +dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent) +{ + const struct dw_edma_debugfs_entry debugfs_regs[] = { + /* eDMA global registers */ + WR_REGISTER(dw, engine_en), + WR_REGISTER(dw, doorbell), + WR_REGISTER(dw, ch_arb_weight.lsb), + WR_REGISTER(dw, ch_arb_weight.msb), + /* eDMA interrupts registers */ + WR_REGISTER(dw, int_status), + WR_REGISTER(dw, int_mask), + WR_REGISTER(dw, int_clear), + WR_REGISTER(dw, err_status), + WR_REGISTER(dw, done_imwr.lsb), + WR_REGISTER(dw, done_imwr.msb), + WR_REGISTER(dw, abort_imwr.lsb), + WR_REGISTER(dw, abort_imwr.msb), + WR_REGISTER(dw, ch01_imwr_data), + WR_REGISTER(dw, ch23_imwr_data), + WR_REGISTER(dw, ch45_imwr_data), + WR_REGISTER(dw, ch67_imwr_data), + WR_REGISTER(dw, linked_list_err_en), + }; + const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + WR_REGISTER_UNROLL(dw, engine_chgroup), + WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), + WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), + WR_REGISTER_UNROLL(dw, ch0_pwr_en), + WR_REGISTER_UNROLL(dw, ch1_pwr_en), + WR_REGISTER_UNROLL(dw, ch2_pwr_en), + WR_REGISTER_UNROLL(dw, ch3_pwr_en), + WR_REGISTER_UNROLL(dw, ch4_pwr_en), + WR_REGISTER_UNROLL(dw, ch5_pwr_en), + WR_REGISTER_UNROLL(dw, ch6_pwr_en), + WR_REGISTER_UNROLL(dw, ch7_pwr_en), + }; + struct dentry *regs_dent, *ch_dent; + int nr_entries, i; + char name[16]; + + regs_dent = debugfs_create_dir(WRITE_STR, dent); + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); + + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, + regs_dent); + } + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dent = debugfs_create_dir(name, regs_dent); + + dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent); + } +} + +static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw, + struct dentry *dent) +{ + const struct dw_edma_debugfs_entry debugfs_regs[] = { + /* eDMA global registers */ + RD_REGISTER(dw, engine_en), + RD_REGISTER(dw, doorbell), + RD_REGISTER(dw, ch_arb_weight.lsb), + RD_REGISTER(dw, ch_arb_weight.msb), + /* eDMA interrupts registers */ + RD_REGISTER(dw, int_status), + RD_REGISTER(dw, int_mask), + RD_REGISTER(dw, int_clear), + RD_REGISTER(dw, err_status.lsb), + RD_REGISTER(dw, err_status.msb), + RD_REGISTER(dw, linked_list_err_en), + RD_REGISTER(dw, done_imwr.lsb), + RD_REGISTER(dw, done_imwr.msb), + RD_REGISTER(dw, abort_imwr.lsb), + RD_REGISTER(dw, abort_imwr.msb), + RD_REGISTER(dw, ch01_imwr_data), + RD_REGISTER(dw, ch23_imwr_data), + RD_REGISTER(dw, ch45_imwr_data), + RD_REGISTER(dw, ch67_imwr_data), + }; + const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + RD_REGISTER_UNROLL(dw, engine_chgroup), + RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), + RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), + RD_REGISTER_UNROLL(dw, ch0_pwr_en), + RD_REGISTER_UNROLL(dw, ch1_pwr_en), + RD_REGISTER_UNROLL(dw, ch2_pwr_en), + RD_REGISTER_UNROLL(dw, ch3_pwr_en), + RD_REGISTER_UNROLL(dw, ch4_pwr_en), + RD_REGISTER_UNROLL(dw, ch5_pwr_en), + RD_REGISTER_UNROLL(dw, ch6_pwr_en), + RD_REGISTER_UNROLL(dw, ch7_pwr_en), + }; + struct dentry *regs_dent, *ch_dent; + int nr_entries, i; + char name[16]; + + regs_dent = debugfs_create_dir(READ_STR, dent); + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); + + if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, + regs_dent); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dent = debugfs_create_dir(name, regs_dent); + + dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent); + } +} + +static void dw_edma_debugfs_regs(struct dw_edma *dw) +{ + const struct dw_edma_debugfs_entry debugfs_regs[] = { + REGISTER(dw, ctrl_data_arb_prior), + REGISTER(dw, ctrl), + }; + struct dentry *regs_dent; + int nr_entries; + + regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root); + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); + + dw_edma_debugfs_regs_wr(dw, regs_dent); + dw_edma_debugfs_regs_rd(dw, regs_dent); +} + +void dw_edma_v0_debugfs_on(struct dw_edma *dw) +{ + if (!debugfs_initialized()) + return; + + debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf); + debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt); + + dw_edma_debugfs_regs(dw); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 0000000000..fb3342d97d --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_DEBUG_FS_H +#define _DW_EDMA_V0_DEBUG_FS_H + +#include <linux/dma/edma.h> + +#ifdef CONFIG_DEBUG_FS +void dw_edma_v0_debugfs_on(struct dw_edma *dw); +#else +static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_EDMA_V0_DEBUG_FS_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 0000000000..e175f7b204 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_REGS_H +#define _DW_EDMA_V0_REGS_H + +#include <linux/dmaengine.h> + +#define EDMA_V0_MAX_NR_CH 8 +#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) +#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) +#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) +#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) +#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) +#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) +#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) +#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) + +#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) +#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) + +struct dw_edma_v0_ch_regs { + u32 ch_control1; /* 0x0000 */ + u32 ch_control2; /* 0x0004 */ + u32 transfer_size; /* 0x0008 */ + union { + u64 reg; /* 0x000c..0x0010 */ + struct { + u32 lsb; /* 0x000c */ + u32 msb; /* 0x0010 */ + }; + } sar; + union { + u64 reg; /* 0x0014..0x0018 */ + struct { + u32 lsb; /* 0x0014 */ + u32 msb; /* 0x0018 */ + }; + } dar; + union { + u64 reg; /* 0x001c..0x0020 */ + struct { + u32 lsb; /* 0x001c */ + u32 msb; /* 0x0020 */ + }; + } llp; +} __packed; + +struct dw_edma_v0_ch { + struct dw_edma_v0_ch_regs wr; /* 0x0200 */ + u32 padding_1[55]; /* 0x0224..0x02fc */ + struct dw_edma_v0_ch_regs rd; /* 0x0300 */ + u32 padding_2[55]; /* 0x0324..0x03fc */ +} __packed; + +struct dw_edma_v0_unroll { + u32 padding_1; /* 0x00f8 */ + u32 wr_engine_chgroup; /* 0x0100 */ + u32 rd_engine_chgroup; /* 0x0104 */ + union { + u64 reg; /* 0x0108..0x010c */ + struct { + u32 lsb; /* 0x0108 */ + u32 msb; /* 0x010c */ + }; + } wr_engine_hshake_cnt; + u32 padding_2[2]; /* 0x0110..0x0114 */ + union { + u64 reg; /* 0x0120..0x0124 */ + struct { + u32 lsb; /* 0x0120 */ + u32 msb; /* 0x0124 */ + }; + } rd_engine_hshake_cnt; + u32 padding_3[2]; /* 0x0120..0x0124 */ + u32 wr_ch0_pwr_en; /* 0x0128 */ + u32 wr_ch1_pwr_en; /* 0x012c */ + u32 wr_ch2_pwr_en; /* 0x0130 */ + u32 wr_ch3_pwr_en; /* 0x0134 */ + u32 wr_ch4_pwr_en; /* 0x0138 */ + u32 wr_ch5_pwr_en; /* 0x013c */ + u32 wr_ch6_pwr_en; /* 0x0140 */ + u32 wr_ch7_pwr_en; /* 0x0144 */ + u32 padding_4[8]; /* 0x0148..0x0164 */ + u32 rd_ch0_pwr_en; /* 0x0168 */ + u32 rd_ch1_pwr_en; /* 0x016c */ + u32 rd_ch2_pwr_en; /* 0x0170 */ + u32 rd_ch3_pwr_en; /* 0x0174 */ + u32 rd_ch4_pwr_en; /* 0x0178 */ + u32 rd_ch5_pwr_en; /* 0x018c */ + u32 rd_ch6_pwr_en; /* 0x0180 */ + u32 rd_ch7_pwr_en; /* 0x0184 */ + u32 padding_5[30]; /* 0x0188..0x01fc */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */ +} __packed; + +struct dw_edma_v0_legacy { + u32 viewport_sel; /* 0x00f8 */ + struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */ +} __packed; + +struct dw_edma_v0_regs { + /* eDMA global registers */ + u32 ctrl_data_arb_prior; /* 0x0000 */ + u32 padding_1; /* 0x0004 */ + u32 ctrl; /* 0x0008 */ + u32 wr_engine_en; /* 0x000c */ + u32 wr_doorbell; /* 0x0010 */ + u32 padding_2; /* 0x0014 */ + union { + u64 reg; /* 0x0018..0x001c */ + struct { + u32 lsb; /* 0x0018 */ + u32 msb; /* 0x001c */ + }; + } wr_ch_arb_weight; + u32 padding_3[3]; /* 0x0020..0x0028 */ + u32 rd_engine_en; /* 0x002c */ + u32 rd_doorbell; /* 0x0030 */ + u32 padding_4; /* 0x0034 */ + union { + u64 reg; /* 0x0038..0x003c */ + struct { + u32 lsb; /* 0x0038 */ + u32 msb; /* 0x003c */ + }; + } rd_ch_arb_weight; + u32 padding_5[3]; /* 0x0040..0x0048 */ + /* eDMA interrupts registers */ + u32 wr_int_status; /* 0x004c */ + u32 padding_6; /* 0x0050 */ + u32 wr_int_mask; /* 0x0054 */ + u32 wr_int_clear; /* 0x0058 */ + u32 wr_err_status; /* 0x005c */ + union { + u64 reg; /* 0x0060..0x0064 */ + struct { + u32 lsb; /* 0x0060 */ + u32 msb; /* 0x0064 */ + }; + } wr_done_imwr; + union { + u64 reg; /* 0x0068..0x006c */ + struct { + u32 lsb; /* 0x0068 */ + u32 msb; /* 0x006c */ + }; + } wr_abort_imwr; + u32 wr_ch01_imwr_data; /* 0x0070 */ + u32 wr_ch23_imwr_data; /* 0x0074 */ + u32 wr_ch45_imwr_data; /* 0x0078 */ + u32 wr_ch67_imwr_data; /* 0x007c */ + u32 padding_7[4]; /* 0x0080..0x008c */ + u32 wr_linked_list_err_en; /* 0x0090 */ + u32 padding_8[3]; /* 0x0094..0x009c */ + u32 rd_int_status; /* 0x00a0 */ + u32 padding_9; /* 0x00a4 */ + u32 rd_int_mask; /* 0x00a8 */ + u32 rd_int_clear; /* 0x00ac */ + u32 padding_10; /* 0x00b0 */ + union { + u64 reg; /* 0x00b4..0x00b8 */ + struct { + u32 lsb; /* 0x00b4 */ + u32 msb; /* 0x00b8 */ + }; + } rd_err_status; + u32 padding_11[2]; /* 0x00bc..0x00c0 */ + u32 rd_linked_list_err_en; /* 0x00c4 */ + u32 padding_12; /* 0x00c8 */ + union { + u64 reg; /* 0x00cc..0x00d0 */ + struct { + u32 lsb; /* 0x00cc */ + u32 msb; /* 0x00d0 */ + }; + } rd_done_imwr; + union { + u64 reg; /* 0x00d4..0x00d8 */ + struct { + u32 lsb; /* 0x00d4 */ + u32 msb; /* 0x00d8 */ + }; + } rd_abort_imwr; + u32 rd_ch01_imwr_data; /* 0x00dc */ + u32 rd_ch23_imwr_data; /* 0x00e0 */ + u32 rd_ch45_imwr_data; /* 0x00e4 */ + u32 rd_ch67_imwr_data; /* 0x00e8 */ + u32 padding_13[4]; /* 0x00ec..0x00f8 */ + /* eDMA channel context grouping */ + union dw_edma_v0_type { + struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */ + struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */ + } type; +} __packed; + +struct dw_edma_v0_lli { + u32 control; + u32 transfer_size; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } sar; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } dar; +} __packed; + +struct dw_edma_v0_llp { + u32 control; + u32 reserved; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } llp; +} __packed; + +#endif /* _DW_EDMA_V0_REGS_H */ diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c new file mode 100644 index 0000000000..00b735a020 --- /dev/null +++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Cai Huoqing + * Synopsys DesignWare HDMA v0 core + */ + +#include <linux/bitfield.h> +#include <linux/irqreturn.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "dw-edma-core.h" +#include "dw-hdma-v0-core.h" +#include "dw-hdma-v0-regs.h" +#include "dw-hdma-v0-debugfs.h" + +enum dw_hdma_control { + DW_HDMA_V0_CB = BIT(0), + DW_HDMA_V0_TCB = BIT(1), + DW_HDMA_V0_LLP = BIT(2), + DW_HDMA_V0_LIE = BIT(3), + DW_HDMA_V0_RIE = BIT(4), + DW_HDMA_V0_CCS = BIT(8), + DW_HDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return dw->chip->reg_base; +} + +static inline struct dw_hdma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dir == EDMA_DIR_WRITE) + return &(__dw_regs(dw)->ch[ch].wr); + else + return &(__dw_regs(dw)->ch[ch].rd); +} + +#define SET_CH_32(dw, dir, ch, name, value) \ + writel(value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH_32(dw, dir, ch, name) \ + readl(&(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_BOTH_CH_32(dw, ch, name, value) \ + do { \ + writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \ + writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \ + } while (0) + +/* HDMA management callbacks */ +static void dw_hdma_v0_core_off(struct dw_edma *dw) +{ + int id; + + for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { + SET_BOTH_CH_32(dw, id, int_setup, + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); + SET_BOTH_CH_32(dw, id, int_clear, + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); + SET_BOTH_CH_32(dw, id, ch_en, 0); + } +} + +static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch = 0; + int id; + + for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { + if (GET_CH_32(dw, id, dir, ch_en) & BIT(0)) + num_ch++; + } + + if (num_ch > HDMA_V0_MAX_NR_CH) + num_ch = HDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + u32 tmp; + + tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK, + GET_CH_32(dw, chan->id, chan->dir, ch_stat)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK); +} + +static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK); +} + +static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + return GET_CH_32(dw, chan->dir, chan->id, int_stat); +} + +static irqreturn_t +dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, + dw_edma_handler_t done, dw_edma_handler_t abort) +{ + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + irqreturn_t ret = IRQ_NONE; + struct dw_edma_chan *chan; + unsigned long off, mask; + + if (dir == EDMA_DIR_WRITE) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + for_each_set_bit(pos, &mask, total) { + chan = &dw->chan[pos + off]; + + val = dw_hdma_v0_core_status_int(chan); + if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) { + dw_hdma_v0_core_clear_done_int(chan); + done(chan); + + ret = IRQ_HANDLED; + } + + if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) { + dw_hdma_v0_core_clear_abort_int(chan); + abort(chan); + + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, + u32 control, u32 size, u64 sar, u64 dar) +{ + ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; + + lli->control = control; + lli->transfer_size = size; + lli->sar.reg = sar; + lli->dar.reg = dar; + } else { + struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; + + writel(control, &lli->control); + writel(size, &lli->transfer_size); + writeq(sar, &lli->sar.reg); + writeq(dar, &lli->dar.reg); + } +} + +static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, + int i, u32 control, u64 pointer) +{ + ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; + + llp->control = control; + llp->llp.reg = pointer; + } else { + struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; + + writel(control, &llp->control); + writeq(pointer, &llp->llp.reg); + } +} + +static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_chan *chan = chunk->chan; + u32 control = 0, i = 0; + int j; + + if (chunk->cb) + control = DW_HDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) { + control |= DW_HDMA_V0_LIE; + if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + control |= DW_HDMA_V0_RIE; + } + + dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz, + child->sar, child->dar); + } + + control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB; + if (!chunk->cb) + control |= DW_HDMA_V0_CB; + + dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); +} + +static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->dw; + u32 tmp; + + dw_hdma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0)); + /* Interrupt enable&unmask - done, abort */ + tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | + HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN; + SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); + /* Channel control */ + SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN); + /* Linked list */ + /* llp is not aligned on 64bit -> keep 32bit accesses */ + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, llp.msb, + upper_32_bits(chunk->ll_region.paddr)); + } + /* Set consumer cycle */ + SET_CH_32(dw, chan->dir, chan->id, cycle_sync, + HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); + /* Doorbell */ + SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); +} + +static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->dw; + + /* MSI done addr - low, high */ + SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo); + SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo); + SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi); + /* config MSI data */ + SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data); +} + +/* HDMA debugfs callbacks */ +static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw) +{ + dw_hdma_v0_debugfs_on(dw); +} + +static const struct dw_edma_core_ops dw_hdma_v0_core = { + .off = dw_hdma_v0_core_off, + .ch_count = dw_hdma_v0_core_ch_count, + .ch_status = dw_hdma_v0_core_ch_status, + .handle_int = dw_hdma_v0_core_handle_int, + .start = dw_hdma_v0_core_start, + .ch_config = dw_hdma_v0_core_ch_config, + .debugfs_on = dw_hdma_v0_core_debugfs_on, +}; + +void dw_hdma_v0_core_register(struct dw_edma *dw) +{ + dw->core = &dw_hdma_v0_core; +} diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.h b/drivers/dma/dw-edma/dw-hdma-v0-core.h new file mode 100644 index 0000000000..c373b4f0bd --- /dev/null +++ b/drivers/dma/dw-edma/dw-hdma-v0-core.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Cai Huoqing + * Synopsys DesignWare HDMA v0 core + * + * Author: Cai Huoqing <cai.huoqing@linux.dev> + */ + +#ifndef _DW_HDMA_V0_CORE_H +#define _DW_HDMA_V0_CORE_H + +#include <linux/dma/edma.h> + +/* HDMA core register */ +void dw_hdma_v0_core_register(struct dw_edma *dw); + +#endif /* _DW_HDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c new file mode 100644 index 0000000000..520c81978b --- /dev/null +++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Cai Huoqing + * Synopsys DesignWare HDMA v0 debugfs + * + * Author: Cai Huoqing <cai.huoqing@linux.dev> + */ + +#include <linux/debugfs.h> +#include <linux/bitfield.h> + +#include "dw-hdma-v0-debugfs.h" +#include "dw-hdma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(dw, name) \ + ({ \ + struct dw_hdma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \ + \ + (void __iomem *)&__regs->name; \ + }) + +#define REGS_CH_ADDR(dw, name, _dir, _ch) \ + ({ \ + struct dw_hdma_v0_ch_regs __iomem *__ch_regs; \ + \ + if (_dir == EDMA_DIR_READ) \ + __ch_regs = REGS_ADDR(dw, ch[_ch].rd); \ + else \ + __ch_regs = REGS_ADDR(dw, ch[_ch].wr); \ + \ + (void __iomem *)&__ch_regs->name; \ + }) + +#define CTX_REGISTER(dw, name, dir, ch) \ + {#name, REGS_CH_ADDR(dw, name, dir, ch)} + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +struct dw_hdma_debugfs_entry { + const char *name; + void __iomem *reg; +}; + +static int dw_hdma_debugfs_u32_get(void *data, u64 *val) +{ + struct dw_hdma_debugfs_entry *entry = data; + void __iomem *reg = entry->reg; + + *val = readl(reg); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_hdma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_hdma_debugfs_create_x32(struct dw_edma *dw, + const struct dw_hdma_debugfs_entry ini[], + int nr_entries, struct dentry *dent) +{ + struct dw_hdma_debugfs_entry *entries; + int i; + + entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries), + GFP_KERNEL); + if (!entries) + return; + + for (i = 0; i < nr_entries; i++) { + entries[i] = ini[i]; + + debugfs_create_file_unsafe(entries[i].name, 0444, dent, + &entries[i], &fops_x32); + } +} + +static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir, + u16 ch, struct dentry *dent) +{ + const struct dw_hdma_debugfs_entry debugfs_regs[] = { + CTX_REGISTER(dw, ch_en, dir, ch), + CTX_REGISTER(dw, doorbell, dir, ch), + CTX_REGISTER(dw, prefetch, dir, ch), + CTX_REGISTER(dw, handshake, dir, ch), + CTX_REGISTER(dw, llp.lsb, dir, ch), + CTX_REGISTER(dw, llp.msb, dir, ch), + CTX_REGISTER(dw, cycle_sync, dir, ch), + CTX_REGISTER(dw, transfer_size, dir, ch), + CTX_REGISTER(dw, sar.lsb, dir, ch), + CTX_REGISTER(dw, sar.msb, dir, ch), + CTX_REGISTER(dw, dar.lsb, dir, ch), + CTX_REGISTER(dw, dar.msb, dir, ch), + CTX_REGISTER(dw, watermark_en, dir, ch), + CTX_REGISTER(dw, control1, dir, ch), + CTX_REGISTER(dw, func_num, dir, ch), + CTX_REGISTER(dw, qos, dir, ch), + CTX_REGISTER(dw, ch_stat, dir, ch), + CTX_REGISTER(dw, int_stat, dir, ch), + CTX_REGISTER(dw, int_setup, dir, ch), + CTX_REGISTER(dw, int_clear, dir, ch), + CTX_REGISTER(dw, msi_stop.lsb, dir, ch), + CTX_REGISTER(dw, msi_stop.msb, dir, ch), + CTX_REGISTER(dw, msi_watermark.lsb, dir, ch), + CTX_REGISTER(dw, msi_watermark.msb, dir, ch), + CTX_REGISTER(dw, msi_abort.lsb, dir, ch), + CTX_REGISTER(dw, msi_abort.msb, dir, ch), + CTX_REGISTER(dw, msi_msgdata, dir, ch), + }; + int nr_entries = ARRAY_SIZE(debugfs_regs); + + dw_hdma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent); +} + +static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent) +{ + struct dentry *regs_dent, *ch_dent; + char name[16]; + int i; + + regs_dent = debugfs_create_dir(WRITE_STR, dent); + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dent = debugfs_create_dir(name, regs_dent); + + dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent); + } +} + +static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent) +{ + struct dentry *regs_dent, *ch_dent; + char name[16]; + int i; + + regs_dent = debugfs_create_dir(READ_STR, dent); + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dent = debugfs_create_dir(name, regs_dent); + + dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent); + } +} + +static void dw_hdma_debugfs_regs(struct dw_edma *dw) +{ + struct dentry *regs_dent; + + regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root); + + dw_hdma_debugfs_regs_wr(dw, regs_dent); + dw_hdma_debugfs_regs_rd(dw, regs_dent); +} + +void dw_hdma_v0_debugfs_on(struct dw_edma *dw) +{ + if (!debugfs_initialized()) + return; + + debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf); + debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt); + + dw_hdma_debugfs_regs(dw); +} diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h new file mode 100644 index 0000000000..e6842c8377 --- /dev/null +++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Cai Huoqing + * Synopsys DesignWare HDMA v0 debugfs + * + * Author: Cai Huoqing <cai.huoqing@linux.dev> + */ + +#ifndef _DW_HDMA_V0_DEBUG_FS_H +#define _DW_HDMA_V0_DEBUG_FS_H + +#include <linux/dma/edma.h> + +#ifdef CONFIG_DEBUG_FS +void dw_hdma_v0_debugfs_on(struct dw_edma *dw); +#else +static inline void dw_hdma_v0_debugfs_on(struct dw_edma *dw) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_HDMA_V0_DEBUG_FS_H */ diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h new file mode 100644 index 0000000000..a974abdf8a --- /dev/null +++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Cai Huoqing + * Synopsys DesignWare HDMA v0 reg + * + * Author: Cai Huoqing <cai.huoqing@linux.dev> + */ + +#ifndef _DW_HDMA_V0_REGS_H +#define _DW_HDMA_V0_REGS_H + +#include <linux/dmaengine.h> + +#define HDMA_V0_MAX_NR_CH 8 +#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6) +#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5) +#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4) +#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3) +#define HDMA_V0_ABORT_INT_MASK BIT(2) +#define HDMA_V0_STOP_INT_MASK BIT(0) +#define HDMA_V0_LINKLIST_EN BIT(0) +#define HDMA_V0_CONSUMER_CYCLE_STAT BIT(1) +#define HDMA_V0_CONSUMER_CYCLE_BIT BIT(0) +#define HDMA_V0_DOORBELL_START BIT(0) +#define HDMA_V0_CH_STATUS_MASK GENMASK(1, 0) + +struct dw_hdma_v0_ch_regs { + u32 ch_en; /* 0x0000 */ + u32 doorbell; /* 0x0004 */ + u32 prefetch; /* 0x0008 */ + u32 handshake; /* 0x000c */ + union { + u64 reg; /* 0x0010..0x0014 */ + struct { + u32 lsb; /* 0x0010 */ + u32 msb; /* 0x0014 */ + }; + } llp; + u32 cycle_sync; /* 0x0018 */ + u32 transfer_size; /* 0x001c */ + union { + u64 reg; /* 0x0020..0x0024 */ + struct { + u32 lsb; /* 0x0020 */ + u32 msb; /* 0x0024 */ + }; + } sar; + union { + u64 reg; /* 0x0028..0x002c */ + struct { + u32 lsb; /* 0x0028 */ + u32 msb; /* 0x002c */ + }; + } dar; + u32 watermark_en; /* 0x0030 */ + u32 control1; /* 0x0034 */ + u32 func_num; /* 0x0038 */ + u32 qos; /* 0x003c */ + u32 padding_1[16]; /* 0x0040..0x007c */ + u32 ch_stat; /* 0x0080 */ + u32 int_stat; /* 0x0084 */ + u32 int_setup; /* 0x0088 */ + u32 int_clear; /* 0x008c */ + union { + u64 reg; /* 0x0090..0x0094 */ + struct { + u32 lsb; /* 0x0090 */ + u32 msb; /* 0x0094 */ + }; + } msi_stop; + union { + u64 reg; /* 0x0098..0x009c */ + struct { + u32 lsb; /* 0x0098 */ + u32 msb; /* 0x009c */ + }; + } msi_watermark; + union { + u64 reg; /* 0x00a0..0x00a4 */ + struct { + u32 lsb; /* 0x00a0 */ + u32 msb; /* 0x00a4 */ + }; + } msi_abort; + u32 msi_msgdata; /* 0x00a8 */ + u32 padding_2[21]; /* 0x00ac..0x00fc */ +} __packed; + +struct dw_hdma_v0_ch { + struct dw_hdma_v0_ch_regs wr; /* 0x0000 */ + struct dw_hdma_v0_ch_regs rd; /* 0x0100 */ +} __packed; + +struct dw_hdma_v0_regs { + struct dw_hdma_v0_ch ch[HDMA_V0_MAX_NR_CH]; /* 0x0000..0x0fa8 */ +} __packed; + +struct dw_hdma_v0_lli { + u32 control; + u32 transfer_size; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } sar; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } dar; +} __packed; + +struct dw_hdma_v0_llp { + u32 control; + u32 reserved; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } llp; +} __packed; + +#endif /* _DW_HDMA_V0_REGS_H */ |