summaryrefslogtreecommitdiffstats
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/dma/xilinx
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/xdma-regs.h2
-rw-r--r--drivers/dma/xilinx/xdma.c189
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
5 files changed, 184 insertions, 27 deletions
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index dd98b4526b..e641a5083e 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -44,6 +44,8 @@
FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
#define XDMA_DESC_CONTROL_LAST \
XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+#define XDMA_DESC_CONTROL_CYCLIC \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED)
/*
* Descriptor for a single contiguous memory block transfer.
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e0bfd129d5..2c9c72d4b5 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -83,6 +83,9 @@ struct xdma_chan {
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
+ * @cyclic: Cyclic transfer vs. scatter-gather
+ * @periods: Number of periods in the cyclic transfer
+ * @period_size: Size of a period in bytes in cyclic transfers
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
@@ -93,6 +96,9 @@ struct xdma_desc {
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
+ bool cyclic;
+ u32 periods;
+ u32 period_size;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -137,10 +143,10 @@ static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
}
/**
- * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
+ * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
* @sw_desc: Tx descriptor pointer
*/
-static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
+static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
{
struct xdma_desc_block *block;
u32 last_blk_desc, desc_control;
@@ -174,6 +180,25 @@ static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
}
+/**
+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ block = sw_desc->desc_blocks;
+ for (i = 0; i < sw_desc->desc_num - 1; i++) {
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
+ }
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr);
+}
+
static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct xdma_chan, vchan.chan);
@@ -231,14 +256,16 @@ static void xdma_free_desc(struct virt_dma_desc *vdesc)
* xdma_alloc_desc - Allocate descriptor
* @chan: DMA channel pointer
* @desc_num: Number of hardware descriptors
+ * @cyclic: Whether this is a cyclic transfer
*/
static struct xdma_desc *
-xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
{
struct xdma_desc *sw_desc;
struct xdma_hw_desc *desc;
dma_addr_t dma_addr;
u32 dblk_num;
+ u32 control;
void *addr;
int i, j;
@@ -248,12 +275,18 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
+ sw_desc->cyclic = cyclic;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
if (!sw_desc->desc_blocks)
goto failed;
+ if (cyclic)
+ control = XDMA_DESC_CONTROL_CYCLIC;
+ else
+ control = XDMA_DESC_CONTROL(1, 0);
+
sw_desc->dblk_num = dblk_num;
for (i = 0; i < sw_desc->dblk_num; i++) {
addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
@@ -263,10 +296,13 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
sw_desc->desc_blocks[i].virt_addr = addr;
sw_desc->desc_blocks[i].dma_addr = dma_addr;
for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
- desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
+ desc[j].control = cpu_to_le32(control);
}
- xdma_link_desc_blocks(sw_desc);
+ if (cyclic)
+ xdma_link_cyclic_desc_blocks(sw_desc);
+ else
+ xdma_link_sg_desc_blocks(sw_desc);
return sw_desc;
@@ -466,7 +502,7 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
- sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
@@ -522,6 +558,81 @@ failed:
}
/**
+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
+ * @chan: DMA channel pointer
+ * @address: Device DMA address to access
+ * @size: Total length to transfer
+ * @period_size: Period size to use for each transfer
+ * @dir: Transfer direction
+ * @flags: Transfer ack flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
+ size_t size, size_t period_size,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ unsigned int periods = size / period_size;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+ struct xdma_desc *sw_desc;
+ unsigned int i;
+
+ /*
+ * Simplify the whole logic by preventing an abnormally high number of
+ * periods and periods size.
+ */
+ if (period_size > XDMA_DESC_BLEN_MAX) {
+ xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
+ return NULL;
+ }
+
+ if (periods > XDMA_DESC_ADJACENT) {
+ xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
+ return NULL;
+ }
+
+ sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->periods = periods;
+ sw_desc->period_size = period_size;
+ sw_desc->dir = dir;
+
+ dblk = sw_desc->desc_blocks;
+ desc = dblk->virt_addr;
+
+ /* fill hardware descriptor */
+ for (i = 0; i < periods; i++) {
+ desc->bytes = cpu_to_le32(period_size);
+ if (dir == DMA_MEM_TO_DEV) {
+ desc->src_addr = cpu_to_le64(address + i * period_size);
+ desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
+ } else {
+ desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
+ desc->dst_addr = cpu_to_le64(address + i * period_size);
+ }
+
+ desc++;
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
@@ -577,6 +688,41 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return 0;
}
+static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_desc *desc = NULL;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int period_idx;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ vd = vchan_find_desc(&xdma_chan->vchan, cookie);
+ if (vd)
+ desc = to_xdma_desc(vd);
+ if (!desc || !desc->cyclic) {
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+ return ret;
+ }
+
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ dma_set_residue(state, residue);
+
+ return ret;
+}
+
/**
* xdma_channel_isr - XDMA channel interrupt handler
* @irq: IRQ number
@@ -590,6 +736,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
struct virt_dma_desc *vd;
struct xdma_desc *desc;
int ret;
+ u32 st;
spin_lock(&xchan->vchan.lock);
@@ -607,7 +754,22 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
if (ret)
goto out;
+ if (desc->cyclic) {
+ desc->completed_desc_num = complete_desc_num;
+
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
+ &st);
+ if (ret)
+ goto out;
+
+ regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+
+ vchan_cyclic_callback(vd);
+ goto out;
+ }
+
desc->completed_desc_num += complete_desc_num;
+
/*
* if all data blocks are transferred, remove and complete the request
*/
@@ -621,7 +783,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
goto out;
- /* transfer the rest of data */
+ /* transfer the rest of data (SG only) */
xdma_xfer_start(xchan);
out:
@@ -841,7 +1003,7 @@ EXPORT_SYMBOL(xdma_get_user_irq);
* xdma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
*/
-static int xdma_remove(struct platform_device *pdev)
+static void xdma_remove(struct platform_device *pdev)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
@@ -850,8 +1012,6 @@ static int xdma_remove(struct platform_device *pdev)
if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
dma_async_device_unregister(&xdev->dma_dev);
-
- return 0;
}
/**
@@ -885,7 +1045,7 @@ static int xdma_probe(struct platform_device *pdev)
goto failed;
}
xdev->irq_start = res->start;
- xdev->irq_num = res->end - res->start + 1;
+ xdev->irq_num = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -921,17 +1081,20 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
- xdev->dma_dev.device_tx_status = dma_cookie_status;
+ xdev->dma_dev.device_tx_status = xdma_tx_status;
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
+ xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
@@ -966,7 +1129,7 @@ static struct platform_driver xdma_driver = {
},
.id_table = xdma_id_table,
.probe = xdma_probe,
- .remove = xdma_remove,
+ .remove_new = xdma_remove,
};
module_platform_driver(xdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 0a3b2e22f2..e40696f6f8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3242,10 +3242,8 @@ disable_clks:
/**
* xilinx_dma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
- *
- * Return: Always '0'
*/
-static int xilinx_dma_remove(struct platform_device *pdev)
+static void xilinx_dma_remove(struct platform_device *pdev)
{
struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
int i;
@@ -3259,8 +3257,6 @@ static int xilinx_dma_remove(struct platform_device *pdev)
xilinx_dma_chan_remove(xdev->chan[i]);
xdma_disable_allclks(xdev);
-
- return 0;
}
static struct platform_driver xilinx_vdma_driver = {
@@ -3269,7 +3265,7 @@ static struct platform_driver xilinx_vdma_driver = {
.of_match_table = xilinx_dma_of_ids,
},
.probe = xilinx_dma_probe,
- .remove = xilinx_dma_remove,
+ .remove_new = xilinx_dma_remove,
};
module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 84dc5240a8..69587d85a7 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -1736,7 +1736,7 @@ error:
return ret;
}
-static int xilinx_dpdma_remove(struct platform_device *pdev)
+static void xilinx_dpdma_remove(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
unsigned int i;
@@ -1751,8 +1751,6 @@ static int xilinx_dpdma_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
xilinx_dpdma_chan_remove(xdev->chan[i]);
-
- return 0;
}
static const struct of_device_id xilinx_dpdma_of_match[] = {
@@ -1763,7 +1761,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
static struct platform_driver xilinx_dpdma_driver = {
.probe = xilinx_dpdma_probe,
- .remove = xilinx_dpdma_remove,
+ .remove_new = xilinx_dpdma_remove,
.driver = {
.name = "xilinx-zynqmp-dpdma",
.of_match_table = xilinx_dpdma_of_match,
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index bd8c3cc2ea..f31631bef9 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1147,7 +1147,7 @@ err_disable_pm:
*
* Return: Always '0'
*/
-static int zynqmp_dma_remove(struct platform_device *pdev)
+static void zynqmp_dma_remove(struct platform_device *pdev)
{
struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
@@ -1158,8 +1158,6 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
pm_runtime_disable(zdev->dev);
if (!pm_runtime_enabled(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
-
- return 0;
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1175,7 +1173,7 @@ static struct platform_driver zynqmp_dma_driver = {
.pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
- .remove = zynqmp_dma_remove,
+ .remove_new = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);