diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:15 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:15 +0000 |
commit | 68c1b0995e963349e50f8a8b00ebc140908f7882 (patch) | |
tree | ae3788542761985ee15e3d1ca439706040d1eb0c /drivers/dma | |
parent | Releasing progress-linux version 4.19.269-1progress5u1. (diff) | |
download | linux-68c1b0995e963349e50f8a8b00ebc140908f7882.tar.xz linux-68c1b0995e963349e50f8a8b00ebc140908f7882.zip |
Merging upstream version 4.19.282.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 7 | ||||
-rw-r--r-- | drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 6 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 4 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-dmac.c | 5 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 78 |
5 files changed, 77 insertions, 23 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8a52a5efe..e1cf7c250 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -223,7 +223,8 @@ static int dma_chan_get(struct dma_chan *chan) /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); - goto out; + chan->client_count++; + return 0; } if (!try_module_get(owner)) @@ -236,11 +237,11 @@ static int dma_chan_get(struct dma_chan *chan) goto err_out; } + chan->client_count++; + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); -out: - chan->client_count++; return 0; err_out: diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index c05ef7f1d..99a403852 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -551,6 +551,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* The bad descriptor currently is in the head of vc list */ vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", + axi_chan_name(chan)); + goto out; + } /* Remove the completed descriptor from issued list */ list_del(&vd->node); @@ -565,6 +570,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* Try to restart the controller */ axi_chan_start_first_queued(chan); +out: spin_unlock_irqrestore(&chan->vc.lock, flags); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 709ead443..5794d3120 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1347,10 +1347,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, sdma_config_ownership(sdmac, false, true, false); if (sdma_load_context(sdmac)) - goto err_desc_out; + goto err_bd_out; return desc; +err_bd_out: + sdma_free_bd(desc); err_desc_out: kfree(desc); err_out: diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 29c517623..e18d4116d 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1816,7 +1816,10 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); dmac->dev->dma_parms = &dmac->parms; - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); if (ret) return ret; diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 0ba70be4e..5f9945651 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -164,7 +164,9 @@ #define XILINX_DMA_REG_BTT 0x28 /* AXI DMA Specific Masks/Bit fields */ -#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) +#define XILINX_DMA_MAX_TRANS_LEN_MIN 8 +#define XILINX_DMA_MAX_TRANS_LEN_MAX 23 +#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 @@ -428,6 +430,7 @@ struct xilinx_dma_config { * @rxs_clk: DMA s2mm stream clock * @nr_channels: Number of channels DMA device supports * @chan_id: DMA channel identifier + * @max_buffer_len: Max buffer length */ struct xilinx_dma_device { void __iomem *regs; @@ -447,6 +450,7 @@ struct xilinx_dma_device { struct clk *rxs_clk; u32 nr_channels; u32 chan_id; + u32 max_buffer_len; }; /* Macros */ @@ -970,6 +974,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) } /** + * xilinx_dma_calc_copysize - Calculate the amount of data to copy + * @chan: Driver specific DMA channel + * @size: Total data that needs to be copied + * @done: Amount of data that has been already copied + * + * Return: Amount of data that has to be copied + */ +static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, + int size, int done) +{ + size_t copy; + + copy = min_t(size_t, size - done, + chan->xdev->max_buffer_len); + + return copy; +} + +/** * xilinx_dma_tx_status - Get DMA transaction status * @dchan: DMA channel * @cookie: Transaction identifier @@ -1002,7 +1025,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, list_for_each_entry(segment, &desc->segments, node) { hw = &segment->hw; residue += (hw->control - hw->status) & - XILINX_DMA_MAX_TRANS_LEN; + chan->xdev->max_buffer_len; } } spin_unlock_irqrestore(&chan->lock, flags); @@ -1262,7 +1285,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); + hw->control & chan->xdev->max_buffer_len); } list_splice_tail_init(&chan->pending_list, &chan->active_list); @@ -1365,7 +1388,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); + hw->control & chan->xdev->max_buffer_len); } list_splice_tail_init(&chan->pending_list, &chan->active_list); @@ -1729,7 +1752,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, struct xilinx_cdma_tx_segment *segment; struct xilinx_cdma_desc_hw *hw; - if (!len || len > XILINX_DMA_MAX_TRANS_LEN) + if (!len || len > chan->xdev->max_buffer_len) return NULL; desc = xilinx_dma_alloc_tx_descriptor(chan); @@ -1819,8 +1842,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ - copy = min_t(size_t, sg_dma_len(sg) - sg_used, - XILINX_DMA_MAX_TRANS_LEN); + copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), + sg_used); hw = &segment->hw; /* Fill in the descriptor */ @@ -1924,8 +1947,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ - copy = min_t(size_t, period_len - sg_used, - XILINX_DMA_MAX_TRANS_LEN); + copy = xilinx_dma_calc_copysize(chan, period_len, + sg_used); hw = &segment->hw; xilinx_axidma_buf(chan, hw, buf_addr, sg_used, period_len * i); @@ -2613,7 +2636,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) struct xilinx_dma_device *xdev; struct device_node *child, *np = pdev->dev.of_node; struct resource *io; - u32 num_frames, addr_width; + u32 num_frames, addr_width, len_width; int i, err; /* Allocate and initialize the DMA engine structure */ @@ -2640,13 +2663,30 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Request and map I/O memory */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); xdev->regs = devm_ioremap_resource(&pdev->dev, io); - if (IS_ERR(xdev->regs)) - return PTR_ERR(xdev->regs); - + if (IS_ERR(xdev->regs)) { + err = PTR_ERR(xdev->regs); + goto disable_clks; + } /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); + + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); + if (!of_property_read_u32(node, "xlnx,sg-length-width", + &len_width)) { + if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || + len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { + dev_warn(xdev->dev, + "invalid xlnx,sg-length-width property value. Using default width\n"); + } else { + if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) + dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); + xdev->max_buffer_len = + GENMASK(len_width - 1, 0); + } + } + } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", @@ -2719,8 +2759,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); - if (err < 0) - goto disable_clks; + if (err < 0) { + of_node_put(child); + goto error; + } } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2753,12 +2795,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) return 0; -disable_clks: - xdma_disable_allclks(xdev); error: for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); +disable_clks: + xdma_disable_allclks(xdev); return err; } |