summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/dma
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/dma-buf/dma-fence.c8
-rw-r--r--drivers/dma-buf/dma-resv.c4
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sync_debug.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/bestcomm/sram.c5
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h1
-rw-r--r--drivers/dma/fsl-edma-common.c101
-rw-r--r--drivers/dma/fsl-edma-common.h161
-rw-r--r--drivers/dma/fsl-edma-main.c19
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/bus.c2
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/sysfs.c10
-rw-r--r--drivers/dma/ioat/init.c55
-rw-r--r--drivers/dma/mcf-edma-main.c2
-rw-r--r--drivers/dma/mv_xor_v2.c8
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c73
-rw-r--r--drivers/dma/ti/k3-udma-glue.c297
-rw-r--r--drivers/dma/xilinx/xdma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c6
29 files changed, 595 insertions, 219 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index e0fd99e61a..0393a9bba3 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -102,7 +102,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
* respectively &mmu_interval_notifier callbacks. This means any code required
- * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
+ * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
* Only GFP_ATOMIC is permissible, which might fail.
*
* Note that only GPU drivers have a reasonable excuse for both requiring
@@ -522,7 +522,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
EXPORT_SYMBOL(dma_fence_wait_timeout);
/**
- * dma_fence_release - default relese function for fences
+ * dma_fence_release - default release function for fences
* @kref: &dma_fence.recfount
*
* This is the default release functions for &dma_fence. Drivers shouldn't call
@@ -974,8 +974,8 @@ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
EXPORT_SYMBOL(dma_fence_set_deadline);
/**
- * dma_fence_describe - Dump fence describtion into seq_file
- * @fence: the 6fence to describe
+ * dma_fence_describe - Dump fence description into seq_file
+ * @fence: the fence to describe
* @seq: the seq_file to put the textual description into
*
* Dump a textual description of the fence and it's state into the seq_file.
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index eb8b733065..e2869fb311 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -405,7 +405,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ * this reason prefer the locked dma_resv_iter_first() whenever possible.
*
* Returns the first fence from an unlocked dma_resv obj.
*/
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ * this reason prefer the locked dma_resv_iter_next() whenever possible.
*
* Returns the next fence from an unlocked dma_resv obj.
*/
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index b7c6f7ea9e..6a1bfcd0cc 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
+ if (IS_ERR(t[i].task)) {
+ ret = PTR_ERR(t[i].task);
+ while (--i >= 0)
+ kthread_stop_put(t[i].task);
+ return ret;
+ }
get_task_struct(t[i].task);
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 101394f169..237bce21d1 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irq(&obj->lock);
+ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irq(&obj->lock);
+ spin_unlock(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 002a5ec806..9fc99cfbef 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -394,7 +394,7 @@ config LS2X_APB_DMA
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
- depends on M5441x || COMPILE_TEST
+ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index eea8bd33b4..fbf048f432 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2239,7 +2239,7 @@ static int pl08x_resume(struct dma_chan *chan)
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
struct pl08x_dma_chan *plchan;
- char *name = chan_id;
+ const char *name = chan_id;
/* Reject channels for devices not bound to this driver */
if (chan->device->dev->driver != &pl08x_amba_driver.drv)
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index 0553956f74..ad74d57cc3 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -90,13 +90,8 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
bcom_sram->rh = rh_create(4);
/* Attach the free zones */
-#if 0
- /* Currently disabled ... for future use only */
- reg_addr_p = of_get_property(sram_node, "available", &psize);
-#else
regaddr_p = NULL;
psize = 0;
-#endif
if (!regaddr_p || !psize) {
/* Attach the whole zone */
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 4e339c04fc..d5a33e4a91 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -1134,8 +1134,8 @@ static void axi_dmac_remove(struct platform_device *pdev)
{
struct axi_dmac *dmac = platform_get_drvdata(pdev);
- of_dma_controller_free(pdev->dev.of_node);
free_irq(dmac->irq, dmac);
+ of_dma_controller_free(pdev->dev.of_node);
tasklet_kill(&dmac->chan.vchan.task);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a86a81ff0c..321446fddd 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc);
return NULL;
}
+ desc->nr_hw_descs = num;
return desc;
}
@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- int count = atomic_read(&chan->descs_allocated);
+ int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc;
int descs_put;
@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */
list_del(&vd->node);
vchan_cookie_complete(vd);
-
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
}
out:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 454904d996..ac571b413b 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -104,6 +104,7 @@ struct axi_dma_desc {
u32 completed_blocks;
u32 length;
u32 period_len;
+ u32 nr_hw_descs;
};
struct axi_dma_chan_config {
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 793f1a7ad5..b18faa7cfe 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -97,8 +97,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
* ch_mux: With the exception of 0, attempts to write a value
* already in use will be forced to 0.
*/
- if (!edma_readl_chreg(fsl_chan, ch_mux))
- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
+ edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
}
val = edma_readl_chreg(fsl_chan, ch_csr);
@@ -134,7 +134,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan);
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
- edma_writel_chreg(fsl_chan, 0, ch_mux);
+ edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
val &= ~EDMA_V3_CH_CSR_ERQ;
edma_writel_chreg(fsl_chan, val, ch_csr);
@@ -351,39 +351,45 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
enum dma_transfer_direction dir = edesc->dirn;
- dma_addr_t cur_addr, dma_addr;
+ dma_addr_t cur_addr, dma_addr, old_addr;
size_t len, size;
u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
- len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
}
if (!in_progress)
return len;
- if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_read_tcdreg(fsl_chan, saddr);
- else
- cur_addr = edma_read_tcdreg(fsl_chan, daddr);
+ /* 64bit read is not atomic, need read retry when high 32bit changed */
+ do {
+ if (dir == DMA_MEM_TO_DEV) {
+ old_addr = edma_read_tcdreg(fsl_chan, saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
+ } else {
+ old_addr = edma_read_tcdreg(fsl_chan, daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
+ }
+ } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
- size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
if (dir == DMA_MEM_TO_DEV)
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
else
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+ dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
len -= size;
if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
@@ -426,8 +432,7 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd)
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
{
u16 csr = 0;
@@ -439,26 +444,26 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
*/
edma_write_tcdreg(fsl_chan, 0, csr);
- edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
- edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
- edma_write_tcdreg(fsl_chan, tcd->attr, attr);
- edma_write_tcdreg(fsl_chan, tcd->soff, soff);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
- edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
- edma_write_tcdreg(fsl_chan, tcd->slast, slast);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
- edma_write_tcdreg(fsl_chan, tcd->citer, citer);
- edma_write_tcdreg(fsl_chan, tcd->biter, biter);
- edma_write_tcdreg(fsl_chan, tcd->doff, doff);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
- edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
- csr = le16_to_cpu(tcd->csr);
+ csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
if (fsl_chan->is_sw) {
csr |= EDMA_TCD_CSR_START;
- tcd->csr = cpu_to_le16(csr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
/*
@@ -473,14 +478,14 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
- edma_write_tcdreg(fsl_chan, tcd->csr, csr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
}
static inline
void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
+ u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
+ u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
@@ -493,12 +498,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
* So we put the value in little endian in memory, waiting
* for fsl_edma_set_tcd_regs doing the swap.
*/
- tcd->saddr = cpu_to_le32(src);
- tcd->daddr = cpu_to_le32(dst);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
- tcd->attr = cpu_to_le16(attr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
- tcd->soff = cpu_to_le16(soff);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
@@ -515,15 +520,16 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
}
}
- tcd->nbytes = cpu_to_le32(nbytes);
- tcd->slast = cpu_to_le32(slast);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
+
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(doff);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
- tcd->dlast_sga = cpu_to_le32(dlast_sga);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
@@ -539,7 +545,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_sw)
csr |= EDMA_TCD_CSR_START;
- tcd->csr = cpu_to_le16(csr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -580,8 +586,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_addr_t dma_buf_next;
bool major_int = true;
int sg_len, i;
- u32 src_addr, dst_addr, last_sg, nbytes;
+ dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
+ u32 nbytes;
if (!is_slave_direction(direction))
return NULL;
@@ -653,8 +660,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
struct scatterlist *sg;
- u32 src_addr, dst_addr, last_sg, nbytes;
+ dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
+ u32 nbytes;
int i;
if (!is_slave_direction(direction))
@@ -803,7 +811,8 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
- sizeof(struct fsl_edma_hw_tcd),
+ fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
+ sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
return 0;
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index f5e216b157..7bf0aba471 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -88,6 +88,20 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
+struct fsl_edma_hw_tcd64 {
+ __le64 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le64 slast;
+ __le64 daddr;
+ __le64 dlast_sga;
+ __le16 doff;
+ __le16 citer;
+ __le16 csr;
+ __le16 biter;
+} __packed;
+
struct fsl_edma3_ch_reg {
__le32 ch_csr;
__le32 ch_es;
@@ -97,7 +111,10 @@ struct fsl_edma3_ch_reg {
__le32 ch_mux;
__le32 ch_mattr; /* edma4, reserved for edma3 */
__le32 ch_reserved;
- struct fsl_edma_hw_tcd tcd;
+ union {
+ struct fsl_edma_hw_tcd tcd;
+ struct fsl_edma_hw_tcd64 tcd64;
+ };
} __packed;
/*
@@ -126,7 +143,7 @@ struct edma_regs {
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
- struct fsl_edma_hw_tcd *vtcd;
+ void *vtcd;
};
struct fsl_edma_chan {
@@ -145,7 +162,8 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
- struct fsl_edma_hw_tcd __iomem *tcd;
+ void __iomem *tcd;
+ void __iomem *mux_addr;
u32 real_count;
struct work_struct issue_worker;
struct platform_device *pdev;
@@ -188,6 +206,7 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
+#define FSL_EDMA_DRV_TCD64 BIT(15)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
@@ -207,6 +226,8 @@ struct fsl_edma_drvdata {
u32 chreg_off;
u32 chreg_space_sz;
u32 flags;
+ u32 mux_off; /* channel mux register offset */
+ u32 mux_skip; /* how much skip for each channel */
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -229,23 +250,108 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans);
};
-#define edma_read_tcdreg(chan, __name) \
-(sizeof(chan->tcd->__name) == sizeof(u32) ? \
- edma_readl(chan->edma, &chan->tcd->__name) : \
- edma_readw(chan->edma, &chan->tcd->__name))
-
-#define edma_write_tcdreg(chan, val, __name) \
-(sizeof(chan->tcd->__name) == sizeof(u32) ? \
- edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
- edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
+#define edma_read_tcdreg_c(chan, _tcd, __name) \
+(sizeof((_tcd)->__name) == sizeof(u64) ? \
+ edma_readq(chan->edma, &(_tcd)->__name) : \
+ ((sizeof((_tcd)->__name) == sizeof(u32)) ? \
+ edma_readl(chan->edma, &(_tcd)->__name) : \
+ edma_readw(chan->edma, &(_tcd)->__name) \
+ ))
+
+#define edma_read_tcdreg(chan, __name) \
+((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
+ edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
+ edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
+)
+
+#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
+do { \
+ switch (sizeof(_tcd->__name)) { \
+ case sizeof(u64): \
+ edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u32): \
+ edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u16): \
+ edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u8): \
+ edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
+ break; \
+ } \
+} while (0)
+
+#define edma_write_tcdreg(chan, val, __name) \
+do { \
+ struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
+ \
+ if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
+ edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
+ else \
+ edma_write_tcdreg_c(chan, tcd_r, val, __name); \
+} while (0)
+
+#define edma_cp_tcd_to_reg(chan, __tcd, __name) \
+do { \
+ struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
+ struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
+ \
+ if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
+ edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
+ else \
+ edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
+} while (0)
#define edma_readl_chreg(chan, __name) \
edma_readl(chan->edma, \
- (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+ (void __iomem *)&(container_of(((__force void *)chan->tcd),\
+ struct fsl_edma3_ch_reg, tcd)->__name))
#define edma_writel_chreg(chan, val, __name) \
edma_writel(chan->edma, val, \
- (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+ (void __iomem *)&(container_of(((__force void *)chan->tcd),\
+ struct fsl_edma3_ch_reg, tcd)->__name))
+
+#define fsl_edma_get_tcd(_chan, _tcd, _field) \
+(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
+ (((struct fsl_edma_hw_tcd *)_tcd)->_field))
+
+#define fsl_edma_le_to_cpu(x) \
+(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \
+ (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \
+ le16_to_cpu((__force __le16)(x))))
+
+#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
+(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
+ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
+ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
+
+#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
+do { \
+ switch (sizeof((_tcd)->_field)) { \
+ case sizeof(u64): \
+ *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \
+ break; \
+ case sizeof(u32): \
+ *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
+ break; \
+ case sizeof(u16): \
+ *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
+ break; \
+ } \
+} while (0)
+
+#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
+do { \
+ if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
+ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
+ else \
+ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
+} while (0)
/*
* R/W functions for big- or little-endian registers:
@@ -253,6 +359,21 @@ struct fsl_edma_engine {
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
+static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ u64 l, h;
+
+ if (edma->big_endian) {
+ l = ioread32be(addr);
+ h = ioread32be(addr + 4);
+ } else {
+ l = ioread32(addr);
+ h = ioread32(addr + 4);
+ }
+
+ return (h << 32) | l;
+}
+
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
@@ -298,6 +419,18 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32(val, addr);
}
+static inline void edma_writeq(struct fsl_edma_engine *edma,
+ u64 val, void __iomem *addr)
+{
+ if (edma->big_endian) {
+ iowrite32be(val & 0xFFFFFFFF, addr);
+ iowrite32be(val >> 32, addr + 4);
+ } else {
+ iowrite32(val & 0xFFFFFFFF, addr);
+ iowrite32(val >> 32, addr + 4);
+ }
+}
+
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index d36e28b9c7..402f0058a1 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -360,6 +360,18 @@ static struct fsl_edma_drvdata imx93_data4 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
+ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
+ .mux_skip = 0x8000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx95_data5 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
+ FSL_EDMA_DRV_TCD64,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .mux_off = 0x200,
+ .mux_skip = sizeof(u32),
.setup_irq = fsl_edma3_irq_init,
};
@@ -371,6 +383,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
+ { .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -511,6 +524,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
+ if (drvdata->flags & FSL_EDMA_DRV_TCD64)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
@@ -533,11 +549,12 @@ static int fsl_edma_probe(struct platform_device *pdev)
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
fsl_chan->tcd = fsl_edma->membase
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+ fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_write_tcdreg(fsl_chan, 0, csr);
+ edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1398814d8f..e3505e5678 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ if (ret)
+ return ret;
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
index 0c9e689a2e..b83b27e04f 100644
--- a/drivers/dma/idxd/bus.c
+++ b/drivers/dma/idxd/bus.c
@@ -72,7 +72,7 @@ static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0);
}
-struct bus_type dsa_bus_type = {
+const struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index dcefb838b8..fd9bbee4cc 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev)
mutex_unlock(&wq->wq_lock);
}
-static struct device_type idxd_cdev_file_type = {
+static const struct device_type idxd_cdev_file_type = {
.name = "idxd_file",
.release = idxd_file_dev_release,
.groups = cdev_file_attribute_groups,
@@ -169,7 +169,7 @@ static void idxd_cdev_dev_release(struct device *dev)
kfree(idxd_cdev);
}
-static struct device_type idxd_cdev_device_type = {
+static const struct device_type idxd_cdev_device_type = {
.name = "idxd_cdev",
.release = idxd_cdev_dev_release,
};
@@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev;
- ida_destroy(&file_ida);
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index eb73cabb4a..868b724a3b 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -282,7 +282,7 @@ typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
struct idxd_driver_data {
const char *name_prefix;
enum idxd_type type;
- struct device_type *dev_type;
+ const struct device_type *dev_type;
int compl_size;
int align;
int evl_cr_off;
@@ -518,15 +518,15 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
}
-extern struct bus_type dsa_bus_type;
+extern const struct bus_type dsa_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
-extern struct device_type dsa_device_type;
-extern struct device_type iax_device_type;
-extern struct device_type idxd_wq_device_type;
-extern struct device_type idxd_engine_device_type;
-extern struct device_type idxd_group_device_type;
+extern const struct device_type dsa_device_type;
+extern const struct device_type iax_device_type;
+extern const struct device_type idxd_wq_device_type;
+extern const struct device_type idxd_engine_device_type;
+extern const struct device_type idxd_group_device_type;
static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 8dc029c865..fc049c9c98 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
spin_unlock(&irq_entry->list_lock);
- list_for_each_entry(desc, &flist, list) {
+ list_for_each_entry_safe(desc, n, &flist, list) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
+ list_del(&desc->list);
+
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3a5ce477a8..f706eae0e7 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev)
kfree(engine);
}
-struct device_type idxd_engine_device_type = {
+const struct device_type idxd_engine_device_type = {
.name = "engine",
.release = idxd_conf_engine_release,
.groups = idxd_engine_attribute_groups,
@@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev)
kfree(group);
}
-struct device_type idxd_group_device_type = {
+const struct device_type idxd_group_device_type = {
.name = "group",
.release = idxd_conf_group_release,
.groups = idxd_group_attribute_groups,
@@ -1392,7 +1392,7 @@ static void idxd_conf_wq_release(struct device *dev)
kfree(wq);
}
-struct device_type idxd_wq_device_type = {
+const struct device_type idxd_wq_device_type = {
.name = "wq",
.release = idxd_conf_wq_release,
.groups = idxd_wq_attribute_groups,
@@ -1821,13 +1821,13 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd);
}
-struct device_type dsa_device_type = {
+const struct device_type dsa_device_type = {
.name = "dsa",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,
};
-struct device_type iax_device_type = {
+const struct device_type iax_device_type = {
.name = "iax",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92cb..e8f45a7fde 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -534,18 +534,6 @@ err_out:
return err;
}
-static int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- dma_pool_destroy(ioat_dma->completion_pool);
- }
-
- return err;
-}
-
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
}
- err = ioat_register(ioat_dma);
+ err = dma_async_device_register(&ioat_dma->dma_dev);
if (err)
- return err;
+ goto err_disable_interrupts;
ioat_kobject_add(ioat_dma, &ioat_ktype);
@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
/* clear relaxed ordering enable */
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
return 0;
+
+err_disable_interrupts:
+ ioat_disable_interrupts(ioat_dma);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ return err;
}
static void ioat_shutdown(struct pci_dev *pdev)
@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
+ unsigned int i;
+ u8 version;
int err;
err = pcim_enable_device(pdev);
@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!iomap)
return -ENOMEM;
+ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
+ if (version < IOAT_VER_3_0)
+ return -ENODEV;
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
return err;
@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, device);
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ device->version = version;
if (device->version >= IOAT_VER_3_4)
ioat_dca_enabled = 0;
- if (device->version >= IOAT_VER_3_0) {
- if (is_skx_ioat(pdev))
- device->version = IOAT_VER_3_2;
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- } else
- return -ENODEV;
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
if (err) {
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(device->idx[i]);
+ kfree(device);
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_sed_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index ab21455d9c..dba6317838 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -202,7 +202,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ i * sizeof(struct fsl_edma_hw_tcd);
- iowrite32(0x0, &mcf_chan->tcd->csr);
+ edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr);
}
iowrite32(~0, regs->inth);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 1ebfbe88e7..97ebc791a3 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -747,8 +747,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (IS_ERR(xor_dev->clk))
return PTR_ERR(xor_dev->clk);
- ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
- mv_xor_v2_set_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1,
+ mv_xor_v2_set_msi_msg);
if (ret)
return ret;
@@ -851,7 +851,7 @@ free_hw_desq:
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
free_msi_irqs:
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
return ret;
}
@@ -867,7 +867,7 @@ static void mv_xor_v2_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
tasklet_kill(&xor_dev->irq_tasklet);
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 775a7f408b..e588fff9f2 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(of_dma_lock);
* to the DMA data stored is retuned. A NULL pointer is returned if no match is
* found.
*/
-static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
+static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
{
struct of_dma *ofdma;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 3cf0b38387..ad8e3da1b2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2585,6 +2585,7 @@ static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
desc->status = PREP;
desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
}
spin_unlock_irqrestore(lock, flags);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index d63b93dc70..202ac95227 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
devm_free_irq(dev, virq, &dmadev->lldev);
}
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
#endif
}
@@ -706,8 +706,8 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#ifdef CONFIG_GENERIC_MSI_IRQ
int rc, i, virq;
- rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
- hidma_write_msi_msg);
+ rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
if (rc)
return rc;
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 1d5430fc57..ba08bdcdcd 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -57,6 +57,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721s2_src_ep_map[] = {
/* PDMA_MCASP - McASP0-4 */
@@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c278d5facf..f0a399cf45 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np,
return 0;
}
+static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
+ bool tx_chn)
+{
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ /* get psil endpoint config */
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ return PTR_ERR(common->ep_config);
+ }
+
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ return 0;
+}
+
static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
@@ -153,38 +182,29 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
common->atype_asel = dma_spec.args[1];
}
- if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
- if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+}
- /* get psil endpoint config */
- common->ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(common->ep_config)) {
- dev_err(common->dev,
- "No configuration for psi-l thread 0x%04x\n",
- thread_id);
- ret = PTR_ERR(common->ep_config);
- goto out_put_spec;
- }
+static int
+of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
+ bool tx_chn, u32 thread_id)
+{
+ int ret = 0;
- common->epib = common->ep_config->needs_epib;
- common->psdata_size = common->ep_config->psd_size;
+ if (unlikely(!udmax_np))
+ return -EINVAL;
- if (tx_chn)
- common->dst_thread = thread_id;
- else
- common->src_thread = thread_id;
+ ret = of_k3_udma_glue_parse(udmax_np, common);
+ if (ret)
+ return ret;
-out_put_spec:
- of_node_put(dma_spec.np);
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
return ret;
-};
+}
static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
@@ -251,29 +271,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
-struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
- const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+static int
+k3_udma_glue_request_tx_chn_common(struct device *dev,
+ struct k3_udma_glue_tx_channel *tx_chn,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
{
- struct k3_udma_glue_tx_channel *tx_chn;
int ret;
- tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
- if (!tx_chn)
- return ERR_PTR(-ENOMEM);
-
- tx_chn->common.dev = dev;
- tx_chn->common.swdata_size = cfg->swdata_size;
- tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
- tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
- tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
- tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
-
- /* parse of udmap channel */
- ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
- &tx_chn->common, true);
- if (ret)
- goto err;
-
tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
@@ -289,7 +293,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
- goto err;
+ return ret;
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
@@ -302,7 +306,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
- goto err;
+ return ret;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
@@ -326,7 +330,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
&tx_chn->ringtxcq);
if (ret) {
dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
- goto err;
+ return ret;
}
/* Set the dma_dev for the rings to be configured */
@@ -342,13 +346,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
/* request and cfg psi-l */
@@ -359,11 +363,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_udma_glue_cfg_tx_chn(tx_chn);
if (ret) {
dev_err(dev, "Failed to cfg tchan %d\n", ret);
- goto err;
+ return ret;
}
k3_udma_glue_dump_tx_chn(tx_chn);
+ return 0;
+}
+
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
return tx_chn;
err:
@@ -372,6 +407,41 @@ err:
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
if (tx_chn->psil_paired) {
@@ -1000,12 +1070,59 @@ err:
return ERR_PTR(ret);
}
+static int
+k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device *dev)
+{
+ int ret, i;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows)
+ return -ENOMEM;
+
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
+ rx_chn->common.src_thread, rx_chn->flow_id_base);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ return ret;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return 0;
+}
+
static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
- int ret, i;
+ int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
@@ -1036,44 +1153,55 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
if (ret)
goto err;
- rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
- rx_chn->common.psdata_size,
- rx_chn->common.swdata_size);
-
- rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
- sizeof(*rx_chn->flows), GFP_KERNEL);
- if (!rx_chn->flows) {
- ret = -ENOMEM;
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
goto err;
- }
- rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
- rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
- dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
- rx_chn->common.src_thread);
- ret = device_register(&rx_chn->common.chan_dev);
- if (ret) {
- dev_err(dev, "Channel Device registration failed %d\n", ret);
- put_device(&rx_chn->common.chan_dev);
- rx_chn->common.chan_dev.parent = NULL;
- goto err;
- }
+ return rx_chn;
- if (xudma_is_pktdma(rx_chn->common.udmax)) {
- /* prepare the channel device as coherent */
- rx_chn->common.chan_dev.dma_coherent = true;
- dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
- DMA_BIT_MASK(48));
- }
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
- ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
if (ret)
goto err;
- for (i = 0; i < rx_chn->flow_num; i++)
- rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
-
- k3_udma_glue_dump_rx_chn(rx_chn);
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
+ goto err;
return rx_chn;
@@ -1081,6 +1209,7 @@ err:
k3_udma_glue_release_rx_chn(rx_chn);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 313b217388..ae6e06057a 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ spin_lock(&xchan->vchan.lock);
+
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
- spin_lock(&xchan->vchan.lock);
-
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index e40696f6f8..5eb51ae93e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -112,7 +112,9 @@
/* Register Direct Mode Registers */
#define XILINX_DMA_REG_VSIZE 0x0000
+#define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
#define XILINX_DMA_REG_HSIZE 0x0004
+#define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
@@ -2050,6 +2052,10 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
if (!xt->numf || !xt->sgl[0].size)
return NULL;
+ if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
+ xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
+ return NULL;
+
if (xt->frame_size != 1)
return NULL;