summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/fsl-qdma.c50
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-udma.c10
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
10 files changed, 73 insertions, 47 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 7e1bd79fb..02b98f979 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -614,16 +614,16 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
+ depends on (ARCH_TEGRA || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the NVIDIA Tegra210 ADMA controller driver. The
- DMA controller has multiple DMA channels and is used to service
- various audio clients in the Tegra210 audio processing engine
- (APE). This DMA controller transfers data from memory to
- peripheral and vice versa. It does not support memory to
- memory data transfer.
+ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
+ controller driver. The DMA controller has multiple DMA channels
+ and is used to service various audio clients in the Tegra210
+ audio processing engine (APE). This DMA controller transfers
+ data from memory to peripheral and vice versa. It does not
+ support memory to memory data transfer.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4ec7bb58c..9559ebd61 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1108,6 +1108,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
static void __dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan)
{
+ if (chan->local == NULL)
+ return;
+
WARN_ONCE(!device->device_release && chan->client_count,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ae057922..2d905f063 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
- sizeof(struct dpaa2_fl_entry),
- sizeof(struct dpaa2_fl_entry), 0);
+ dpaa2_chan->fl_pool =
+ dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry) * 3,
+ sizeof(struct dpaa2_fl_entry), 0);
+
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
- sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 045ead46e..7082a5a68 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF;
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
@@ -514,11 +516,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- queue_size[i],
- &queue_temp->bus_addr,
- GFP_KERNEL);
+ dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@@ -563,11 +565,11 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
- status_head->cq = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- status_size,
- &status_head->bus_addr,
- GFP_KERNEL);
+ status_head->cq = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
return NULL;
@@ -805,7 +807,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
int i;
int cpu;
int ret;
- char irq_name[20];
+ char irq_name[32];
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
@@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1243,16 +1241,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
@@ -1272,8 +1273,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static int fsl_qdma_remove(struct platform_device *pdev)
{
- int i;
- struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@@ -1282,11 +1281,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
- for (i = 0; i < fsl_qdma->block_number; i++) {
- status = fsl_qdma->status[i];
- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
- status->n_cq, status->cq, status->bus_addr);
- }
return 0;
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index f5a84c846..db506e1f7 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 04202d75f..695feb344 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -249,7 +249,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -273,7 +273,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9c121a4b3..f97d80343 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
void __iomem *base;
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[32]; /* unique name per DMAC of channel */
int pm_error;
dma_addr_t slave_addr;
};
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index a1adc8d91..69292d4a0 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2462,6 +2462,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2478,6 +2483,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index d3902784c..15eecb757 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -2877,6 +2877,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
+ u8 status;
if (!vd)
return;
@@ -2886,12 +2887,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
- /* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ /* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@@ -2900,7 +2901,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
- result->result = DMA_TRANS_NOERROR;
+ /* Propagate TR Response errors to the client */
+ status = d->hwdesc[0].tr_resp_base->status;
+ if (status)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
}
}
}
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 6c7098032..058c3a6ed 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -213,7 +213,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1101,12 +1102,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1491,7 +1496,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}