From 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:03 +0200 Subject: Adding upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/dma/xilinx/xdma-regs.h | 33 ++-- drivers/dma/xilinx/xdma.c | 361 ++++++++++++++++++++++++++++---------- drivers/dma/xilinx/xilinx_dpdma.c | 15 +- 3 files changed, 301 insertions(+), 108 deletions(-) (limited to 'drivers/dma/xilinx') diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h index e641a5083e..6ad08878e9 100644 --- a/drivers/dma/xilinx/xdma-regs.h +++ b/drivers/dma/xilinx/xdma-regs.h @@ -64,9 +64,10 @@ struct xdma_hw_desc { __le64 next_desc; }; -#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) -#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) -#define XDMA_DESC_BLOCK_ALIGN 4096 +#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) +#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) +#define XDMA_DESC_BLOCK_ALIGN 32 +#define XDMA_DESC_BLOCK_BOUNDARY 4096 /* * Channel registers @@ -76,6 +77,7 @@ struct xdma_hw_desc { #define XDMA_CHAN_CONTROL_W1S 0x8 #define XDMA_CHAN_CONTROL_W1C 0xc #define XDMA_CHAN_STATUS 0x40 +#define XDMA_CHAN_STATUS_RC 0x44 #define XDMA_CHAN_COMPLETED_DESC 0x48 #define XDMA_CHAN_ALIGNMENTS 0x4c #define XDMA_CHAN_INTR_ENABLE 0x90 @@ -101,6 +103,7 @@ struct xdma_hw_desc { #define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4) #define CHAN_CTRL_IE_IDLE_STOPPED BIT(6) #define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9) +#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14) #define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19) #define CHAN_CTRL_NON_INCR_ADDR BIT(25) #define CHAN_CTRL_POLL_MODE_WB BIT(26) @@ -111,8 +114,20 @@ struct xdma_hw_desc { CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ CHAN_CTRL_IE_MAGIC_STOPPED | \ CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ CHAN_CTRL_IE_DESC_ERROR) +/* bits of the channel status register */ +#define XDMA_CHAN_STATUS_BUSY BIT(0) + +#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START + +#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + /* bits of the channel interrupt enable mask */ #define CHAN_IM_DESC_ERROR BIT(19) #define CHAN_IM_READ_ERROR BIT(9) @@ -134,18 +149,6 @@ struct xdma_hw_desc { #define XDMA_SGDMA_DESC_ADJ 0x4088 #define XDMA_SGDMA_DESC_CREDIT 0x408c -/* bits of the SG DMA control register */ -#define XDMA_CTRL_RUN_STOP BIT(0) -#define XDMA_CTRL_IE_DESC_STOPPED BIT(1) -#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2) -#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) -#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4) -#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6) -#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9) -#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19) -#define XDMA_CTRL_NON_INCR_ADDR BIT(25) -#define XDMA_CTRL_POLL_MODE_WB BIT(26) - /* * interrupt registers */ diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 2c9c72d4b5..5a3a3293b2 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -71,6 +71,8 @@ struct xdma_chan { enum dma_transfer_direction dir; struct dma_slave_config cfg; u32 irq; + struct completion last_interrupt; + bool stop_requested; }; /** @@ -78,27 +80,31 @@ struct xdma_chan { * @vdesc: Virtual DMA descriptor * @chan: DMA channel pointer * @dir: Transferring direction of the request - * @dev_addr: Physical address on DMA device side * @desc_blocks: Hardware descriptor blocks * @dblk_num: Number of hardware descriptor blocks * @desc_num: Number of hardware descriptors * @completed_desc_num: Completed hardware descriptors * @cyclic: Cyclic transfer vs. scatter-gather + * @interleaved_dma: Interleaved DMA transfer * @periods: Number of periods in the cyclic transfer * @period_size: Size of a period in bytes in cyclic transfers + * @frames_left: Number of frames left in interleaved DMA transfer + * @error: tx error flag */ struct xdma_desc { struct virt_dma_desc vdesc; struct xdma_chan *chan; enum dma_transfer_direction dir; - u64 dev_addr; struct xdma_desc_block *desc_blocks; u32 dblk_num; u32 desc_num; u32 completed_desc_num; bool cyclic; + bool interleaved_dma; u32 periods; u32 period_size; + u32 frames_left; + bool error; }; #define XDMA_DEV_STATUS_REG_DMA BIT(0) @@ -276,6 +282,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) sw_desc->chan = chan; sw_desc->desc_num = desc_num; sw_desc->cyclic = cyclic; + sw_desc->error = false; dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), GFP_NOWAIT); @@ -371,9 +378,29 @@ static int xdma_xfer_start(struct xdma_chan *xchan) return ret; xchan->busy = true; + xchan->stop_requested = false; + reinit_completion(&xchan->last_interrupt); + return 0; } +/** + * xdma_xfer_stop - Stop DMA transfer + * @xchan: DMA channel pointer + */ +static int xdma_xfer_stop(struct xdma_chan *xchan) +{ + int ret; + struct xdma_device *xdev = xchan->xdev_hdl; + + /* clear run stop bit to prevent any further auto-triggering */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + return ret; +} + /** * xdma_alloc_channels - Detect and allocate DMA channels * @xdev: DMA device pointer @@ -444,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev, xchan->xdev_hdl = xdev; xchan->base = base + i * XDMA_CHAN_STRIDE; xchan->dir = dir; + xchan->stop_requested = false; + init_completion(&xchan->last_interrupt); ret = xdma_channel_init(xchan); if (ret) @@ -475,6 +504,92 @@ static void xdma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); } +/** + * xdma_terminate_all - Terminate all transactions + * @chan: DMA channel pointer + */ +static int xdma_terminate_all(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + LIST_HEAD(head); + + xdma_xfer_stop(xdma_chan); + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + + xdma_chan->busy = false; + xdma_chan->stop_requested = true; + vd = vchan_next_desc(&xdma_chan->vchan); + if (vd) { + list_del(&vd->node); + dma_cookie_complete(&vd->tx); + vchan_terminate_vdesc(vd); + } + vchan_get_all_descriptors(&xdma_chan->vchan, &head); + list_splice_tail(&head, &xdma_chan->vchan.desc_terminated); + + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); + + return 0; +} + +/** + * xdma_synchronize - Synchronize terminated transactions + * @chan: DMA channel pointer + */ +static void xdma_synchronize(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + int st = 0; + + /* If the engine continues running, wait for the last interrupt */ + regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st); + if (st & XDMA_CHAN_STATUS_BUSY) + wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000)); + + vchan_synchronize(&xdma_chan->vchan); +} + +/** + * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses + * @sw_desc: tx descriptor state container + * @src_addr: Value for a ->src_addr field of a first descriptor + * @dst_addr: Value for a ->dst_addr field of a first descriptor + * @size: Total size of a contiguous memory block + * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc + */ +static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, + u64 dst_addr, u32 size, u32 filled_descs_num) +{ + u32 left = size, len, desc_num = filled_descs_num; + struct xdma_desc_block *dblk; + struct xdma_hw_desc *desc; + + dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); + desc = dblk->virt_addr; + desc += desc_num & XDMA_DESC_ADJACENT_MASK; + do { + len = min_t(u32, left, XDMA_DESC_BLEN_MAX); + /* set hardware descriptor */ + desc->bytes = cpu_to_le32(len); + desc->src_addr = cpu_to_le64(src_addr); + desc->dst_addr = cpu_to_le64(dst_addr); + if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) + desc = (++dblk)->virt_addr; + else + desc++; + + src_addr += len; + dst_addr += len; + left -= len; + } while (left); + + return desc_num - filled_descs_num; +} + /** * xdma_prep_device_sg - prepare a descriptor for a DMA transaction * @chan: DMA channel pointer @@ -491,13 +606,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, { struct xdma_chan *xdma_chan = to_xdma_chan(chan); struct dma_async_tx_descriptor *tx_desc; - u32 desc_num = 0, i, len, rest; - struct xdma_desc_block *dblk; - struct xdma_hw_desc *desc; struct xdma_desc *sw_desc; - u64 dev_addr, *src, *dst; + u32 desc_num = 0, i; + u64 addr, dev_addr, *src, *dst; struct scatterlist *sg; - u64 addr; for_each_sg(sgl, sg, sg_len, i) desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); @@ -506,6 +618,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!sw_desc) return NULL; sw_desc->dir = dir; + sw_desc->cyclic = false; + sw_desc->interleaved_dma = false; if (dir == DMA_MEM_TO_DEV) { dev_addr = xdma_chan->cfg.dst_addr; @@ -517,32 +631,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, dst = &addr; } - dblk = sw_desc->desc_blocks; - desc = dblk->virt_addr; - desc_num = 1; + desc_num = 0; for_each_sg(sgl, sg, sg_len, i) { addr = sg_dma_address(sg); - rest = sg_dma_len(sg); - - do { - len = min_t(u32, rest, XDMA_DESC_BLEN_MAX); - /* set hardware descriptor */ - desc->bytes = cpu_to_le32(len); - desc->src_addr = cpu_to_le64(*src); - desc->dst_addr = cpu_to_le64(*dst); - - if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) { - dblk++; - desc = dblk->virt_addr; - } else { - desc++; - } - - desc_num++; - dev_addr += len; - addr += len; - rest -= len; - } while (rest); + desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); + dev_addr += sg_dma_len(sg); } tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); @@ -576,9 +669,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, struct xdma_device *xdev = xdma_chan->xdev_hdl; unsigned int periods = size / period_size; struct dma_async_tx_descriptor *tx_desc; - struct xdma_desc_block *dblk; - struct xdma_hw_desc *desc; struct xdma_desc *sw_desc; + u64 addr, dev_addr, *src, *dst; + u32 desc_num; unsigned int i; /* @@ -602,22 +695,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, sw_desc->periods = periods; sw_desc->period_size = period_size; sw_desc->dir = dir; + sw_desc->interleaved_dma = false; - dblk = sw_desc->desc_blocks; - desc = dblk->virt_addr; + addr = address; + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr; + src = &dev_addr; + dst = &addr; + } - /* fill hardware descriptor */ + desc_num = 0; for (i = 0; i < periods; i++) { - desc->bytes = cpu_to_le32(period_size); - if (dir == DMA_MEM_TO_DEV) { - desc->src_addr = cpu_to_le64(address + i * period_size); - desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr); - } else { - desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr); - desc->dst_addr = cpu_to_le64(address + i * period_size); - } - - desc++; + desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num); + addr += period_size; } tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); @@ -632,6 +726,57 @@ failed: return NULL; } +/** + * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers + * @chan: DMA channel + * @xt: DMA transfer template + * @flags: tx flags + */ +static struct dma_async_tx_descriptor * +xdma_prep_interleaved_dma(struct dma_chan *chan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + int i; + u32 desc_num = 0, period_size = 0; + struct dma_async_tx_descriptor *tx_desc; + struct xdma_chan *xchan = to_xdma_chan(chan); + struct xdma_desc *sw_desc; + u64 src_addr, dst_addr; + + for (i = 0; i < xt->frame_size; ++i) + desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xchan, desc_num, false); + if (!sw_desc) + return NULL; + sw_desc->dir = xt->dir; + sw_desc->interleaved_dma = true; + sw_desc->cyclic = flags & DMA_PREP_REPEAT; + sw_desc->frames_left = xt->numf; + sw_desc->periods = xt->numf; + + desc_num = 0; + src_addr = xt->src_start; + dst_addr = xt->dst_start; + for (i = 0; i < xt->frame_size; ++i) { + desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num); + src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ? + xt->sgl[i].size : 0); + dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ? + xt->sgl[i].size : 0); + period_size += xt->sgl[i].size; + } + sw_desc->period_size = period_size; + + tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags); + if (tx_desc) + return tx_desc; + + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + /** * xdma_device_config - Configure the DMA channel * @chan: DMA channel @@ -677,9 +822,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan) return -EINVAL; } - xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), - dev, XDMA_DESC_BLOCK_SIZE, - XDMA_DESC_BLOCK_ALIGN, 0); + xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE, + XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); if (!xdma_chan->desc_pool) { xdma_err(xdev, "unable to allocate descriptor pool"); return -ENOMEM; @@ -706,20 +850,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie spin_lock_irqsave(&xdma_chan->vchan.lock, flags); vd = vchan_find_desc(&xdma_chan->vchan, cookie); - if (vd) - desc = to_xdma_desc(vd); - if (!desc || !desc->cyclic) { - spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); - return ret; - } - - period_idx = desc->completed_desc_num % desc->periods; - residue = (desc->periods - period_idx) * desc->period_size; + if (!vd) + goto out; + desc = to_xdma_desc(vd); + if (desc->error) { + ret = DMA_ERROR; + } else if (desc->cyclic) { + period_idx = desc->completed_desc_num % desc->periods; + residue = (desc->periods - period_idx) * desc->period_size; + dma_set_residue(state, residue); + } +out: spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); - dma_set_residue(state, residue); - return ret; } @@ -732,11 +876,15 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id) { struct xdma_chan *xchan = dev_id; u32 complete_desc_num = 0; - struct xdma_device *xdev; - struct virt_dma_desc *vd; + struct xdma_device *xdev = xchan->xdev_hdl; + struct virt_dma_desc *vd, *next_vd; struct xdma_desc *desc; int ret; u32 st; + bool repeat_tx; + + if (xchan->stop_requested) + complete(&xchan->last_interrupt); spin_lock(&xchan->vchan.lock); @@ -745,47 +893,76 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id) if (!vd) goto out; - xchan->busy = false; + /* Clear-on-read the status register */ + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); + if (ret) + goto out; + desc = to_xdma_desc(vd); - xdev = xchan->xdev_hdl; + + st &= XDMA_CHAN_STATUS_MASK; + if ((st & XDMA_CHAN_ERROR_MASK) || + !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { + desc->error = true; + xdma_err(xdev, "channel error, status register value: 0x%x", st); + goto out; + } ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, &complete_desc_num); if (ret) goto out; - if (desc->cyclic) { - desc->completed_desc_num = complete_desc_num; - - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, - &st); - if (ret) + if (desc->interleaved_dma) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { + xdma_xfer_start(xchan); goto out; + } - regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st); + /* last desc of any frame */ + desc->frames_left--; + if (desc->frames_left) + goto out; - vchan_cyclic_callback(vd); - goto out; - } + /* last desc of the last frame */ + repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; + next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); + if (next_vd) + repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); + if (repeat_tx) { + desc->frames_left = desc->periods; + desc->completed_desc_num = 0; + vchan_cyclic_callback(vd); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ + xdma_xfer_start(xchan); + } else if (!desc->cyclic) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + + /* if all data blocks are transferred, remove and complete the request */ + if (desc->completed_desc_num == desc->desc_num) { + list_del(&vd->node); + vchan_cookie_complete(vd); + goto out; + } - desc->completed_desc_num += complete_desc_num; + if (desc->completed_desc_num > desc->desc_num || + complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) + goto out; - /* - * if all data blocks are transferred, remove and complete the request - */ - if (desc->completed_desc_num == desc->desc_num) { - list_del(&vd->node); - vchan_cookie_complete(vd); - goto out; + /* transfer the rest of data */ + xdma_xfer_start(xchan); + } else { + desc->completed_desc_num = complete_desc_num; + vchan_cyclic_callback(vd); } - if (desc->completed_desc_num > desc->desc_num || - complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) - goto out; - - /* transfer the rest of data (SG only) */ - xdma_xfer_start(xchan); - out: spin_unlock(&xchan->vchan.lock); return IRQ_HANDLED; @@ -1082,6 +1259,9 @@ static int xdma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); xdev->dma_dev.dev = &pdev->dev; xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; @@ -1091,10 +1271,13 @@ static int xdma_probe(struct platform_device *pdev) xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; xdev->dma_dev.device_config = xdma_device_config; xdev->dma_dev.device_issue_pending = xdma_issue_pending; + xdev->dma_dev.device_terminate_all = xdma_terminate_all; + xdev->dma_dev.device_synchronize = xdma_synchronize; xdev->dma_dev.filter.map = pdata->device_map; xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; xdev->dma_dev.filter.fn = xdma_filter_fn; xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; + xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; ret = dma_async_device_register(&xdev->dma_dev); if (ret) { diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index 69587d85a7..eb0637d903 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc { * @running: true if the channel is running * @first_frame: flag for the first frame of stream * @video_group: flag if multi-channel operation is needed for video channels - * @lock: lock to access struct xilinx_dpdma_chan + * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before + * @vchan.lock, if both are to be held. * @desc_pool: descriptor allocation pool * @err_task: error IRQ bottom half handler * @desc: References to descriptors being processed @@ -309,7 +310,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf) out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR); out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, - out_str_len); + out_str_len + 1); snprintf(buf, out_str_len, "%d", dpdma_debugfs.xilinx_dpdma_irq_done_count); @@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) * Complete the active descriptor, if any, promote the pending * descriptor to active, and queue the next transfer, if any. */ + spin_lock(&chan->vchan.lock); if (chan->desc.active) vchan_cookie_complete(&chan->desc.active->vdesc); chan->desc.active = pending; chan->desc.pending = NULL; xilinx_dpdma_chan_queue_transfer(chan); + spin_unlock(&chan->vchan.lock); out: spin_unlock_irqrestore(&chan->lock, flags); @@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); unsigned long flags; - spin_lock_irqsave(&chan->vchan.lock, flags); + spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->vchan.lock); if (vchan_issue_pending(&chan->vchan)) xilinx_dpdma_chan_queue_transfer(chan); - spin_unlock_irqrestore(&chan->vchan.lock, flags); + spin_unlock(&chan->vchan.lock); + spin_unlock_irqrestore(&chan->lock, flags); } static int xilinx_dpdma_config(struct dma_chan *dchan, @@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t) XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->vchan.lock); xilinx_dpdma_chan_queue_transfer(chan); + spin_unlock(&chan->vchan.lock); spin_unlock_irqrestore(&chan->lock, flags); } -- cgit v1.2.3