summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-08 16:58:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-08 16:58:07 +0000
commit5a6d13c86f2fe6304450e907fc1d8d72da82efef (patch)
tree1bd4e8854203c575eabaa99e2c509f8886065733 /drivers/dma
parentAdding upstream version 6.1.76. (diff)
downloadlinux-5a6d13c86f2fe6304450e907fc1d8d72da82efef.tar.xz
linux-5a6d13c86f2fe6304450e907fc1d8d72da82efef.zip
Adding upstream version 6.1.82.upstream/6.1.82
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/dma/apple-admac.c5
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/fsl-qdma.c50
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-udma.c10
8 files changed, 57 insertions, 44 deletions
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 4cf8da77b..cac4532fe 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -56,6 +56,8 @@
#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
+#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
#define BUS_WIDTH_8BIT 0x00
#define BUS_WIDTH_16BIT 0x01
#define BUS_WIDTH_32BIT 0x02
@@ -739,7 +741,8 @@ static int admac_device_config(struct dma_chan *chan,
struct admac_data *ad = adchan->host;
bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
int wordsize = 0;
- u32 bus_width = 0;
+ u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
+ ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 8dd40d00a..6b829d347 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
- sizeof(struct dpaa2_fl_entry),
- sizeof(struct dpaa2_fl_entry), 0);
+ dpaa2_chan->fl_pool =
+ dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry) * 3,
+ sizeof(struct dpaa2_fl_entry), 0);
+
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
- sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 045ead46e..7082a5a68 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF;
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
@@ -514,11 +516,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- queue_size[i],
- &queue_temp->bus_addr,
- GFP_KERNEL);
+ dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@@ -563,11 +565,11 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
- status_head->cq = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- status_size,
- &status_head->bus_addr,
- GFP_KERNEL);
+ status_head->cq = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
return NULL;
@@ -805,7 +807,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
int i;
int cpu;
int ret;
- char irq_name[20];
+ char irq_name[32];
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
@@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1243,16 +1241,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
@@ -1272,8 +1273,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static int fsl_qdma_remove(struct platform_device *pdev)
{
- int i;
- struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@@ -1282,11 +1281,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
- for (i = 0; i < fsl_qdma->block_number; i++) {
- status = fsl_qdma->status[i];
- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
- status->n_cq, status->cq, status->bus_addr);
- }
return 0;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index e2070df6c..0b846c605 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
}
/**
- * __cleanup - reclaim used descriptors
+ * __ioat_cleanup - reclaim used descriptors
* @ioat_chan: channel (ring) to clean
* @phys_complete: zeroed (or not) completion address (from status)
*/
-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
{
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent *desc;
@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
spin_lock_bh(&ioat_chan->cleanup_lock);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
ioat_quiesce(ioat_chan, 0);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
__ioat_restart_chan(ioat_chan);
}
@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
/* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list *t)
/* timer restarted in ioat_cleanup_preamble
* and IOAT_COMPLETION_ACK cleared
*/
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
goto unlock_out;
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 1aa65e5de..f79240734 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9c121a4b3..f97d80343 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
void __iomem *base;
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[32]; /* unique name per DMAC of channel */
int pm_error;
dma_addr_t slave_addr;
};
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 7ec6e5d72..9212ac9f9 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2413,6 +2413,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2429,6 +2434,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b86b809eb..82e7acfda 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3963,6 +3963,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
+ u8 status;
if (!vd)
return;
@@ -3972,12 +3973,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
- /* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ /* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@@ -3986,7 +3987,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
- result->result = DMA_TRANS_NOERROR;
+ /* Propagate TR Response errors to the client */
+ status = d->hwdesc[0].tr_resp_base->status;
+ if (status)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
}
}
}