summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c32
-rw-r--r--drivers/dma/dma-axi-dmac.c280
-rw-r--r--drivers/dma/dmatest.c17
-rw-r--r--drivers/dma/fsl-qdma.c26
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/bus.c6
-rw-r--r--drivers/dma/idxd/cdev.c15
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/defaults.c53
-rw-r--r--drivers/dma/idxd/device.c21
-rw-r--r--drivers/dma/idxd/dma.c9
-rw-r--r--drivers/dma/idxd/idxd.h85
-rw-r--r--drivers/dma/idxd/init.c9
-rw-r--r--drivers/dma/idxd/irq.c16
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/submit.c9
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ls2x-apb-dma.c705
-rw-r--r--drivers/dma/milbeaut-hdmac.c17
-rw-r--r--drivers/dma/milbeaut-xdmac.c17
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h8
-rw-r--r--drivers/dma/sh/rz-dmac.c8
-rw-r--r--drivers/dma/sh/usb-dmac.c10
-rw-r--r--drivers/dma/ste_dma40.c12
-rw-r--r--drivers/dma/tegra186-gpc-dma.c11
-rw-r--r--drivers/dma/tegra210-adma.c35
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/k3-psil-am62p.c325
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c2
-rw-r--r--drivers/dma/ti/k3-udma.c2
-rw-r--r--drivers/dma/uniphier-mdmac.c17
-rw-r--r--drivers/dma/uniphier-xdmac.c17
-rw-r--r--drivers/dma/xilinx/xdma-regs.h33
-rw-r--r--drivers/dma/xilinx/xdma.c361
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c15
41 files changed, 1930 insertions, 333 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de6eb370d4..002a5ec806 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -378,6 +378,20 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LS2X_APB_DMA
+ tristate "Loongson LS2X APB DMA support"
+ depends on LOONGARCH || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Loongson LS2X APB DMA controller driver. The
+ DMA controller is having single DMA channel which can be
+ configured for different peripherals like audio, nand, sdio
+ etc which is in APB bus.
+
+ This DMA controller transfers data from memory to peripheral fifo.
+ It does not support memory to memory data transfer.
+
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 83553a97a0..dfd40d14e4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fb89ecbf0c..40052d1bd0 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
- * @sg_len: number of sg entries.
+ * @sglen: number of sg entries.
* @sg: array of sgs.
+ * @boundary: number of transfers to perform before the automatic address increment operation
+ * @dst_hole: value to add to the destination address when the boundary has been reached
+ * @src_hole: value to add to the source address when the boundary has been reached
+ * @memset_buffer: buffer used for the memset operation
+ * @memset_paddr: physical address of the buffer used for the memset operation
+ * @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
- * atc_status - information bits stored in channel status flag
+ * enum atc_status - information bits stored in channel status flag
+ *
+ * @ATC_IS_PAUSED: If channel is pauses
+ * @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
- bool cyclic;
struct at_desc *desc;
};
@@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
- * @ch_regs: memory mapped register base
+ * @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
+ * @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
+ * @atchan: pointer to an atmel hdmac channel.
+ * @desc: pointer to the descriptor for which the residue is calculated.
+ * @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
- * @atchan: pointer to an atmel hdmac channel.
- * @desc: pointer to the descriptor for which the residue is calculated.
- * @residue: residue to be set to dma_tx_state.
- * Returns 0 on success, -errno otherwise.
+ *
+ * Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
- * Return 0 on success, -errono otherwise.
+ *
+ * Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
- * return - the number of allocated descriptors
+ * Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 2457a420c1..4e339c04fc 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -81,9 +81,13 @@
#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
+#define AXI_DMAC_REG_CURRENT_SG_ID 0x454
+#define AXI_DMAC_REG_SG_ADDRESS 0x47c
+#define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
#define AXI_DMAC_CTRL_ENABLE BIT(0)
#define AXI_DMAC_CTRL_PAUSE BIT(1)
+#define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
#define AXI_DMAC_IRQ_SOT BIT(0)
#define AXI_DMAC_IRQ_EOT BIT(1)
@@ -97,20 +101,35 @@
/* The maximum ID allocated by the hardware is 31 */
#define AXI_DMAC_SG_UNUSED 32U
+/* Flags for axi_dmac_hw_desc.flags */
+#define AXI_DMAC_HW_FLAG_LAST BIT(0)
+#define AXI_DMAC_HW_FLAG_IRQ BIT(1)
+
+struct axi_dmac_hw_desc {
+ u32 flags;
+ u32 id;
+ u64 dest_addr;
+ u64 src_addr;
+ u64 next_sg_addr;
+ u32 y_len;
+ u32 x_len;
+ u32 src_stride;
+ u32 dst_stride;
+ u64 __pad[2];
+};
+
struct axi_dmac_sg {
- dma_addr_t src_addr;
- dma_addr_t dest_addr;
- unsigned int x_len;
- unsigned int y_len;
- unsigned int dest_stride;
- unsigned int src_stride;
- unsigned int id;
unsigned int partial_len;
bool schedule_when_free;
+
+ struct axi_dmac_hw_desc *hw;
+ dma_addr_t hw_phys;
};
struct axi_dmac_desc {
struct virt_dma_desc vdesc;
+ struct axi_dmac_chan *chan;
+
bool cyclic;
bool have_partial_xfer;
@@ -139,6 +158,7 @@ struct axi_dmac_chan {
bool hw_partial_xfer;
bool hw_cyclic;
bool hw_2d;
+ bool hw_sg;
};
struct axi_dmac {
@@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
unsigned int flags = 0;
unsigned int val;
- val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
- if (val) /* Queue is full, wait for the next SOT IRQ */
- return;
+ if (!chan->hw_sg) {
+ val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+ if (val) /* Queue is full, wait for the next SOT IRQ */
+ return;
+ }
desc = chan->next_desc;
@@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
sg = &desc->sg[desc->num_submitted];
/* Already queued in cyclic mode. Wait for it to finish */
- if (sg->id != AXI_DMAC_SG_UNUSED) {
+ if (sg->hw->id != AXI_DMAC_SG_UNUSED) {
sg->schedule_when_free = true;
return;
}
- desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs ||
- desc->have_partial_xfer) {
+ if (chan->hw_sg) {
+ chan->next_desc = NULL;
+ } else if (++desc->num_submitted == desc->num_sgs ||
+ desc->have_partial_xfer) {
if (desc->cyclic)
desc->num_submitted = 0; /* Start again */
else
@@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
chan->next_desc = desc;
}
- sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+ sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
- if (axi_dmac_dest_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
- }
+ if (!chan->hw_sg) {
+ if (axi_dmac_dest_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride);
+ }
- if (axi_dmac_src_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+ if (axi_dmac_src_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride);
+ }
}
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call and only a single segment, enable hw cyclic mode to avoid
- * unnecessary interrupts.
+ * call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
- desc->num_sgs == 1)
- flags |= AXI_DMAC_FLAG_CYCLIC;
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
+ if (chan->hw_sg)
+ desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
+ else if (desc->num_sgs == 1)
+ flags |= AXI_DMAC_FLAG_CYCLIC;
+ }
if (chan->hw_partial_xfer)
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
- axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+ if (chan->hw_sg) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH,
+ (u64)sg->hw_phys >> 32);
+ } else {
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len);
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len);
+ }
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
}
@@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
struct axi_dmac_sg *sg)
{
if (chan->hw_2d)
- return sg->x_len * sg->y_len;
+ return (sg->hw->x_len + 1) * (sg->hw->y_len + 1);
else
- return sg->x_len;
+ return (sg->hw->x_len + 1);
}
static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
@@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
for (i = 0; i < desc->num_sgs; i++) {
sg = &desc->sg[i];
- if (sg->id == AXI_DMAC_SG_UNUSED)
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED)
continue;
- if (sg->id == id) {
+ if (sg->hw->id == id) {
desc->have_partial_xfer = true;
sg->partial_len = len;
found_sg = true;
@@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
rslt->result = DMA_TRANS_NOERROR;
rslt->residue = 0;
+ if (chan->hw_sg)
+ return;
+
/*
* We get here if the last completed segment is partial, which
* means we can compute the residue from that segment onwards
@@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
axi_dmac_dequeue_partial_xfers(chan);
- do {
- sg = &active->sg[active->num_completed];
- if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
- break;
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- sg->id = AXI_DMAC_SG_UNUSED;
- if (sg->schedule_when_free) {
- sg->schedule_when_free = false;
- start_next = true;
+ if (chan->hw_sg) {
+ if (active->cyclic) {
+ vchan_cyclic_callback(&active->vdesc);
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ start_next = !!active;
}
+ } else {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->hw->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->hw->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
- if (sg->partial_len)
- axi_dmac_compute_residue(chan, active);
+ if (sg->partial_len)
+ axi_dmac_compute_residue(chan, active);
- if (active->cyclic)
- vchan_cyclic_callback(&active->vdesc);
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
- if (active->num_completed == active->num_sgs ||
- sg->partial_len) {
- if (active->cyclic) {
- active->num_completed = 0; /* wrap around */
- } else {
- list_del(&active->vdesc.node);
- vchan_cookie_complete(&active->vdesc);
- active = axi_dmac_active_desc(chan);
+ if (active->num_completed == active->num_sgs ||
+ sg->partial_len) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ }
}
- }
- } while (active);
+ } while (active);
+ }
return start_next;
}
@@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
unsigned long flags;
+ u32 ctrl = AXI_DMAC_CTRL_ENABLE;
- axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+ if (chan->hw_sg)
+ ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan))
@@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
-static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+static struct axi_dmac_desc *
+axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
{
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hws;
struct axi_dmac_desc *desc;
+ dma_addr_t hw_phys;
unsigned int i;
desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
desc->num_sgs = num_sgs;
+ desc->chan = chan;
+
+ hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
+ &hw_phys, GFP_ATOMIC);
+ if (!hws) {
+ kfree(desc);
+ return NULL;
+ }
- for (i = 0; i < num_sgs; i++)
- desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+ for (i = 0; i < num_sgs; i++) {
+ desc->sg[i].hw = &hws[i];
+ desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
+
+ hws[i].id = AXI_DMAC_SG_UNUSED;
+ hws[i].flags = 0;
+
+ /* Link hardware descriptors */
+ hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
+ }
+
+ /* The last hardware descriptor will trigger an interrupt */
+ desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
return desc;
}
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+ struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
+ dma_addr_t hw_phys = desc->sg[0].hw_phys;
+
+ dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
+ hw, hw_phys);
+ kfree(desc);
+}
+
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
@@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
for (i = 0; i < num_periods; i++) {
- len = period_len;
-
- while (len > segment_size) {
+ for (len = period_len; len > segment_size; sg++) {
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = segment_size;
- sg->y_len = 1;
- sg++;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = segment_size - 1;
+ sg->hw->y_len = 0;
+ sg->hw->flags = 0;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = len;
- sg->y_len = 1;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = len - 1;
+ sg->hw->y_len = 0;
sg++;
addr += len;
}
@@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
- desc = axi_dmac_alloc_desc(num_sgs);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
@@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
- kfree(desc);
+ axi_dmac_free_desc(desc);
return NULL;
}
@@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, num_segments;
+ unsigned int num_periods, num_segments, num_sgs;
if (direction != chan->direction)
return NULL;
@@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ num_sgs = num_periods * num_segments;
- desc = axi_dmac_alloc_desc(num_periods * num_segments);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
+ /* Chain the last descriptor to the first, and remove its "last" flag */
+ desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
+ desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
+
axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
period_len, desc->sg);
@@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
return NULL;
}
- desc = axi_dmac_alloc_desc(1);
+ desc = axi_dmac_alloc_desc(chan, 1);
if (!desc)
return NULL;
if (axi_dmac_src_is_mem(chan)) {
- desc->sg[0].src_addr = xt->src_start;
- desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+ desc->sg[0].hw->src_addr = xt->src_start;
+ desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg;
}
if (axi_dmac_dest_is_mem(chan)) {
- desc->sg[0].dest_addr = xt->dst_start;
- desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+ desc->sg[0].hw->dest_addr = xt->dst_start;
+ desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg;
}
if (chan->hw_2d) {
- desc->sg[0].x_len = xt->sgl[0].size;
- desc->sg[0].y_len = xt->numf;
+ desc->sg[0].hw->x_len = xt->sgl[0].size - 1;
+ desc->sg[0].hw->y_len = xt->numf - 1;
} else {
- desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
- desc->sg[0].y_len = 1;
+ desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1;
+ desc->sg[0].hw->y_len = 0;
}
if (flags & DMA_CYCLIC)
@@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
{
- kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+ axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
}
static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
@@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
case AXI_DMAC_REG_CURRENT_DEST_ADDR:
case AXI_DMAC_REG_PARTIAL_XFER_LEN:
case AXI_DMAC_REG_PARTIAL_XFER_ID:
+ case AXI_DMAC_REG_CURRENT_SG_ID:
+ case AXI_DMAC_REG_SG_ADDRESS:
+ case AXI_DMAC_REG_SG_ADDRESS_HIGH:
return true;
default:
return false;
@@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS))
+ chan->hw_sg = true;
+
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
@@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct axi_dmac *dmac;
struct regmap *regmap;
unsigned int version;
+ u32 irq_mask = 0;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
INIT_LIST_HEAD(&dma_dev->channels);
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
@@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (dmac->chan.hw_sg)
+ irq_mask |= AXI_DMAC_IRQ_SOT;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask);
if (of_dma_is_coherent(pdev->dev.of_node)) {
ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ffe621695e..a4f6088378 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -21,6 +21,10 @@
#include <linux/slab.h>
#include <linux/wait.h>
+static bool nobounce;
+module_param(nobounce, bool, 0644);
+MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)");
+
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
@@ -90,6 +94,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
* struct dmatest_params - test parameters.
+ * @nobounce: prevent using swiotlb buffer
* @buf_size: size of the memcpy test buffer
* @channel: bus ID of the channel to test
* @device: bus ID of the DMA Engine to test
@@ -106,6 +111,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
* @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
+ bool nobounce;
unsigned int buf_size;
char channel[20];
char device[32];
@@ -215,6 +221,7 @@ struct dmatest_done {
struct dmatest_data {
u8 **raw;
u8 **aligned;
+ gfp_t gfp_flags;
unsigned int cnt;
unsigned int off;
};
@@ -533,7 +540,7 @@ static int dmatest_alloc_test_data(struct dmatest_data *d,
goto err;
for (i = 0; i < d->cnt; i++) {
- d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ d->raw[i] = kmalloc(buf_size + align, d->gfp_flags);
if (!d->raw[i])
goto err;
@@ -655,6 +662,13 @@ static int dmatest_func(void *data)
goto err_free_coefs;
}
+ src->gfp_flags = GFP_KERNEL;
+ dst->gfp_flags = GFP_KERNEL;
+ if (params->nobounce) {
+ src->gfp_flags = GFP_DMA;
+ dst->gfp_flags = GFP_DMA;
+ }
+
if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
@@ -1093,6 +1107,7 @@ static void add_threaded_test(struct dmatest_info *info)
struct dmatest_params *params = &info->params;
/* Copy test parameters */
+ params->nobounce = nobounce;
params->buf_size = test_buf_size;
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
strscpy(params->device, strim(test_device), sizeof(params->device));
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 7a24279318..5005e138fc 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -161,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -355,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -384,15 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET) |
- FSL_QDMA_CMD_PF;
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -570,10 +569,9 @@ static struct fsl_qdma_queue
status_size,
&status_head->bus_addr,
GFP_KERNEL);
- if (!status_head->cq) {
- devm_kfree(&pdev->dev, status_head);
+ if (!status_head->cq)
return NULL;
- }
+
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@@ -627,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d..1398814d8f 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index c5e679070e..2b4a0d406e 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
idxd_bus-y := bus.o
obj-$(CONFIG_INTEL_IDXD) += idxd.o
-idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o defaults.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
index 6f84621053..0c9e689a2e 100644
--- a/drivers/dma/idxd/bus.c
+++ b/drivers/dma/idxd/bus.c
@@ -67,11 +67,17 @@ static void idxd_config_bus_remove(struct device *dev)
idxd_drv->remove(idxd_dev);
}
+static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0);
+}
+
struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,
.remove = idxd_config_bus_remove,
+ .uevent = idxd_bus_uevent,
};
EXPORT_SYMBOL_GPL(dsa_bus_type);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 3dd25a9a04..59456f2177 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
struct idxd_wq *wq = idxd_cdev->wq;
cdev_ctx = &ictx[wq->idxd->data->type];
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
kfree(idxd_cdev);
}
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -463,7 +462,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
cdev = &idxd_cdev->cdev;
dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
- minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0) {
kfree(idxd_cdev);
return minor;
@@ -550,7 +549,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
}
wq->type = IDXD_WQT_USER;
- rc = drv_enable_wq(wq);
+ rc = idxd_drv_enable_wq(wq);
if (rc < 0)
goto err;
@@ -565,7 +564,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_cdev:
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
err:
destroy_workqueue(wq->wq);
wq->type = IDXD_WQT_NONE;
@@ -580,7 +579,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq);
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
destroy_workqueue(wq->wq);
wq->wq = NULL;
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676..ad4245cb30 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
new file mode 100644
index 0000000000..c607ae8dd1
--- /dev/null
+++ b/drivers/dma/idxd/defaults.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Intel Corporation. All rights rsvd. */
+#include <linux/kernel.h>
+#include "idxd.h"
+
+int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct idxd_group *group;
+ struct idxd_wq *wq;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return 0;
+
+ wq = idxd->wqs[0];
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ /* set mode to "dedicated" */
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+
+ /* only setting up 1 wq, so give it all the wq space */
+ wq->size = idxd->max_wq_size;
+
+ /* set priority to 10 */
+ wq->priority = 10;
+
+ /* set type to "kernel" */
+ wq->type = IDXD_WQT_KERNEL;
+
+ /* set wq group to 0 */
+ group = idxd->groups[0];
+ wq->group = group;
+ group->num_wqs++;
+
+ /* set name to "iaa_crypto" */
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+
+ /* set driver_name to "crypto" */
+ memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
+ strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+
+ engine = idxd->engines[0];
+
+ /* set engine group to 0 */
+ engine->group = idxd->groups[0];
+ engine->group->num_engines++;
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index fa0f880bea..c41ef195ee 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -161,6 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
free_hw_descs(wq);
return rc;
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
void idxd_wq_free_resources(struct idxd_wq *wq)
{
@@ -174,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
sbitmap_queue_free(&wq->sbq);
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
int idxd_wq_enable(struct idxd_wq *wq)
{
@@ -405,6 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
reinit_completion(&wq->wq_resurrect);
return 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
void __idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -414,6 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq)
complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
+EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
void idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -421,6 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
__idxd_wq_quiesce(wq);
mutex_unlock(&wq->wq_lock);
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
@@ -770,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -791,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -814,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -831,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
@@ -1273,7 +1278,7 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
tx = &desc->txd;
tx->callback = NULL;
tx->callback_result = NULL;
- idxd_dma_complete_txd(desc, ctype, true);
+ idxd_dma_complete_txd(desc, ctype, true, NULL, NULL);
}
}
@@ -1357,7 +1362,7 @@ err_irq:
return rc;
}
-int drv_enable_wq(struct idxd_wq *wq)
+int idxd_drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1489,8 +1494,9 @@ err_map_portal:
err:
return rc;
}
+EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD);
-void drv_disable_wq(struct idxd_wq *wq)
+void idxd_drv_disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1510,6 +1516,7 @@ void drv_disable_wq(struct idxd_wq *wq)
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 47a01893cf..cd835eabd3 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -22,7 +22,7 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type,
- bool free_desc)
+ bool free_desc, void *ctx, u32 *status)
{
struct idxd_device *idxd = desc->wq->idxd;
struct dma_async_tx_descriptor *tx;
@@ -314,7 +314,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
wq->type = IDXD_WQT_KERNEL;
- rc = drv_enable_wq(wq);
+ rc = idxd_drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO;
@@ -333,7 +333,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
@@ -347,7 +347,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
__idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
mutex_unlock(&wq->wq_lock);
}
@@ -359,6 +359,7 @@ static enum idxd_dev_type dev_types[] = {
struct idxd_device_driver idxd_dmaengine_drv = {
.probe = idxd_dmaengine_drv_probe,
.remove = idxd_dmaengine_drv_remove,
+ .desc_complete = idxd_dma_complete_txd,
.name = "dmaengine",
.type = dev_types,
};
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 96062ae39f..df91472f0f 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -13,6 +13,7 @@
#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <linux/iommu.h>
+#include <linux/crypto.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -57,11 +58,23 @@ enum idxd_type {
#define IDXD_ENQCMDS_RETRIES 32
#define IDXD_ENQCMDS_MAX_RETRIES 64
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+ IDXD_COMPLETE_DEV_FAIL,
+};
+
+struct idxd_desc;
+
struct idxd_device_driver {
const char *name;
enum idxd_dev_type *type;
int (*probe)(struct idxd_dev *idxd_dev);
void (*remove)(struct idxd_dev *idxd_dev);
+ void (*desc_complete)(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc,
+ void *ctx, u32 *status);
struct device_driver drv;
};
@@ -174,12 +187,6 @@ enum idxd_op_type {
IDXD_OP_NONBLOCK = 1,
};
-enum idxd_complete_type {
- IDXD_COMPLETE_NORMAL = 0,
- IDXD_COMPLETE_ABORT,
- IDXD_COMPLETE_DEV_FAIL,
-};
-
struct idxd_dma_chan {
struct dma_chan chan;
struct idxd_wq *wq;
@@ -270,6 +277,8 @@ struct idxd_dma_dev {
struct dma_device dma;
};
+typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
+
struct idxd_driver_data {
const char *name_prefix;
enum idxd_type type;
@@ -279,11 +288,12 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -377,6 +387,14 @@ static inline unsigned int evl_size(struct idxd_device *idxd)
return idxd->evl->size * evl_ent_size(idxd);
}
+struct crypto_ctx {
+ struct acomp_req *req;
+ struct crypto_tfm *tfm;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ bool compress;
+};
+
/* IDXD software descriptor */
struct idxd_desc {
union {
@@ -389,7 +407,10 @@ struct idxd_desc {
struct iax_completion_record *iax_completion;
};
dma_addr_t compl_dma;
- struct dma_async_tx_descriptor txd;
+ union {
+ struct dma_async_tx_descriptor txd;
+ struct crypto_ctx crypto;
+ };
struct llist_node llnode;
struct list_head list;
int id;
@@ -416,6 +437,15 @@ enum idxd_completion_status {
#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
+static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq)
+{
+ struct device *dev = wq_confdev(wq);
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+
+ return idxd_drv;
+}
+
static inline struct idxd_device *confdev_to_idxd(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
@@ -617,6 +647,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private)
+{
+ dev_set_drvdata(wq_confdev(wq), private);
+}
+
+static inline void *idxd_wq_get_private(struct idxd_wq *wq)
+{
+ return dev_get_drvdata(wq_confdev(wq));
+}
+
/*
* Intel IAA does not support batch processing.
* The max batch size of device, max batch size of wq and
@@ -654,6 +694,9 @@ static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *d
return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
}
+#define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*")
+#define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d"
+
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
@@ -664,6 +707,24 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
#define module_idxd_driver(__idxd_driver) \
module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc, void *ctx, u32 *status);
+
+static inline void idxd_desc_complete(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc)
+{
+ struct idxd_device_driver *drv;
+ u32 status;
+
+ drv = wq_to_idxd_drv(desc->wq);
+ if (drv->desc_complete)
+ drv->desc_complete(desc, comp_type, free_desc,
+ &desc->txd, &status);
+}
+
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
@@ -671,6 +732,7 @@ void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
+int idxd_load_iaa_device_defaults(struct idxd_device *idxd);
/* device interrupt control */
irqreturn_t idxd_misc_thread(int vec, void *data);
@@ -681,8 +743,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
-int drv_enable_wq(struct idxd_wq *wq);
-void drv_disable_wq(struct idxd_wq *wq);
+int idxd_drv_enable_wq(struct idxd_wq *wq);
+void idxd_drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
@@ -717,14 +779,11 @@ int idxd_wq_request_irq(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
-void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index d09a8553ea..264c4e47d7 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -59,6 +59,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
+ .load_device_defaults = idxd_load_iaa_device_defaults,
},
};
@@ -353,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -754,6 +755,12 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 3bdfc1797f..8dc029c865 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -123,7 +123,7 @@ static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie)
list_for_each_entry_safe(d, t, &flist, list) {
list_del(&d->list);
- idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(d, IDXD_COMPLETE_ABORT, true);
}
}
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
@@ -533,7 +533,7 @@ static void idxd_int_handle_resubmit_work(struct work_struct *work)
*/
if (rc != -EAGAIN) {
desc->completion->status = IDXD_COMP_DESC_ABORT;
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, false);
}
idxd_free_desc(wq, desc);
}
@@ -574,11 +574,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
@@ -617,11 +617,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true);
}
}
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d6042..5e94247e1e 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 3f922518e3..817a564413 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -61,6 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
return __get_desc(wq, idx, cpu);
}
+EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
@@ -69,6 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
desc->cpu = -1;
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
+EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD);
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
@@ -125,7 +127,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
- idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false,
+ NULL, NULL);
/*
* completing the descriptor will return desc to allocator and
@@ -135,7 +138,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
- idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
+ NULL, NULL);
}
}
@@ -215,3 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
percpu_ref_put(&wq->wq_active);
return 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f81ecf5863..9b42f5e96b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -421,9 +421,7 @@ struct sdma_desc {
* @shp_addr: value for gReg[6]
* @per_addr: value for gReg[2]
* @status: status of dma channel
- * @context_loaded: ensure context is only loaded once
* @data: specific sdma interface structure
- * @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
* @terminated: terminated list
* @is_ram_script: flag for script in ram
@@ -486,8 +484,6 @@ struct sdma_channel {
* @num_script_addrs: Number of script addresses in this image
* @ram_code_start: offset of SDMA ram image in this firmware image
* @ram_code_size: size of SDMA ram image
- * @script_addrs: Stores the start address of the SDMA scripts
- * (in SDMA memory space)
*/
struct sdma_firmware_header {
u32 magic;
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
new file mode 100644
index 0000000000..a49913f3ed
--- /dev/null
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the Loongson LS2X APB DMA Controller
+ *
+ * Copyright (C) 2017-2023 Loongson Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Global Configuration Register */
+#define LDMA_ORDER_ERG 0x0
+
+/* Bitfield definitions */
+
+/* Bitfields in Global Configuration Register */
+#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
+#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
+#define LDMA_ASK_VALID BIT(2)
+#define LDMA_START BIT(3) /* DMA start operation */
+#define LDMA_STOP BIT(4) /* DMA stop operation */
+#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields in ndesc_addr field of HW decriptor */
+#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
+#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
+
+/* Bitfields in cmd field of HW decriptor */
+#define LDMA_INT BIT(1) /* Enable DMA interrupts */
+#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
+
+#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define LDMA_MAX_TRANS_LEN U32_MAX
+
+/*-- descriptors -----------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_hw_desc - DMA HW descriptor
+ * @ndesc_addr: the next descriptor low address.
+ * @mem_addr: memory low address.
+ * @apb_addr: device buffer address.
+ * @len: length of a piece of carried content, in words.
+ * @step_len: length between two moved memory data blocks.
+ * @step_times: number of blocks to be carried in a single DMA operation.
+ * @cmd: descriptor command or state.
+ * @stats: DMA status.
+ * @high_ndesc_addr: the next descriptor high address.
+ * @high_mem_addr: memory high address.
+ * @reserved: reserved
+ */
+struct ls2x_dma_hw_desc {
+ u32 ndesc_addr;
+ u32 mem_addr;
+ u32 apb_addr;
+ u32 len;
+ u32 step_len;
+ u32 step_times;
+ u32 cmd;
+ u32 stats;
+ u32 high_ndesc_addr;
+ u32 high_mem_addr;
+ u32 reserved[2];
+} __packed;
+
+/*
+ * struct ls2x_dma_sg - ls2x dma scatter gather entry
+ * @hw: the pointer to DMA HW descriptor.
+ * @llp: physical address of the DMA HW descriptor.
+ * @phys: destination or source address(mem).
+ * @len: number of Bytes to read.
+ */
+struct ls2x_dma_sg {
+ struct ls2x_dma_hw_desc *hw;
+ dma_addr_t llp;
+ dma_addr_t phys;
+ u32 len;
+};
+
+/*
+ * struct ls2x_dma_desc - software descriptor
+ * @vdesc: pointer to the virtual dma descriptor.
+ * @cyclic: flag to dma cyclic
+ * @burst_size: burst size of transaction, in words.
+ * @desc_num: number of sg entries.
+ * @direction: transfer direction, to or from device.
+ * @status: dma controller status.
+ * @sg: array of sgs.
+ */
+struct ls2x_dma_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+ size_t burst_size;
+ u32 desc_num;
+ enum dma_transfer_direction direction;
+ enum dma_status status;
+ struct ls2x_dma_sg sg[] __counted_by(desc_num);
+};
+
+/*-- Channels --------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
+ * @vchan: virtual dma channel entry.
+ * @desc: pointer to the ls2x sw dma descriptor.
+ * @pool: hw desc table
+ * @irq: irq line
+ * @sconfig: configuration for slave transfers, passed via .device_config
+ */
+struct ls2x_dma_chan {
+ struct virt_dma_chan vchan;
+ struct ls2x_dma_desc *desc;
+ void *pool;
+ int irq;
+ struct dma_slave_config sconfig;
+};
+
+/*-- Controller ------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_priv - LS2X APB DMAC specific information
+ * @ddev: dmaengine dma_device object members
+ * @dma_clk: DMAC clock source
+ * @regs: memory mapped register base
+ * @lchan: channel to store ls2x_dma_chan structures
+ */
+struct ls2x_dma_priv {
+ struct dma_device ddev;
+ struct clk *dma_clk;
+ void __iomem *regs;
+ struct ls2x_dma_chan lchan;
+};
+
+/*-- Helper functions ------------------------------------------------*/
+
+static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct ls2x_dma_desc, vdesc);
+}
+
+static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ls2x_dma_chan, vchan.chan);
+}
+
+static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
+{
+ return container_of(ddev, struct ls2x_dma_priv, ddev);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
+ struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
+ int i;
+
+ for (i = 0; i < desc->desc_num; i++) {
+ if (desc->sg[i].hw)
+ dma_pool_free(lchan->pool, desc->sg[i].hw,
+ desc->sg[i].llp);
+ }
+
+ kfree(desc);
+}
+
+static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ u64 val;
+
+ val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
+ val |= LDMA_64BIT_EN | cmd;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ struct ls2x_dma_sg *ldma_sg;
+ struct virt_dma_desc *vdesc;
+ u64 val;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&lchan->vchan);
+ if (!vdesc) {
+ lchan->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ lchan->desc = to_ldma_desc(vdesc);
+ ldma_sg = &lchan->desc->sg[0];
+
+ /* Start DMA */
+ lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
+ val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
+{
+ u32 maxburst, buswidth;
+
+ /* Reject definitely invalid configurations */
+ if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
+ (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
+ return 0;
+
+ if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
+ maxburst = lchan->sconfig.dst_maxburst;
+ buswidth = lchan->sconfig.dst_addr_width;
+ } else {
+ maxburst = lchan->sconfig.src_maxburst;
+ buswidth = lchan->sconfig.src_addr_width;
+ }
+
+ /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
+ return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
+}
+
+static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
+ struct ls2x_dma_desc *desc)
+{
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
+ u32 num_segments, segment_size;
+
+ if (desc->direction == DMA_MEM_TO_DEV) {
+ ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
+ ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
+ } else {
+ ldma_sg->hw->cmd = LDMA_INT;
+ ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
+ }
+
+ ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
+ ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
+ segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
+
+ /* Word count register takes input in words */
+ ldma_sg->hw->len = segment_size;
+ ldma_sg->hw->step_times = num_segments;
+ ldma_sg->hw->step_len = 0;
+
+ /* lets make a link list */
+ if (sg_index) {
+ desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
+ desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
+ }
+}
+
+/*-- DMA Engine API --------------------------------------------------*/
+
+/*
+ * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
+ * @chan: allocate descriptor resources for this channel
+ *
+ * return - the number of allocated descriptors
+ */
+static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->device->dev, PAGE_SIZE,
+ __alignof__(struct ls2x_dma_hw_desc), 0);
+ if (!lchan->pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
+
+ return 1;
+}
+
+/*
+ * ls2x_dma_free_chan_resources - free all channel resources
+ * @chan: DMA channel
+ */
+static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+ dma_pool_destroy(lchan->pool);
+ lchan->pool = NULL;
+}
+
+/*
+ * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ struct scatterlist *sg;
+ size_t burst_size;
+ int i;
+
+ if (unlikely(!sg_len || !is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = sg_len;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = sg_dma_address(sg);
+ ldma_sg->len = sg_dma_len(sg);
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Setting the last descriptor enable bit */
+ desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ size_t burst_size;
+ u32 num_periods;
+ int i;
+
+ if (unlikely(!buf_len || !period_len))
+ return NULL;
+
+ if (unlikely(!is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = num_periods;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ /* Build cyclic linked list */
+ for (i = 0; i < num_periods; i++) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = buf_addr + period_len * i;
+ ldma_sg->len = period_len;
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Lets make a cyclic list */
+ desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
+ desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
+ desc->cyclic = true;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ */
+static int ls2x_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ memcpy(&lchan->sconfig, config, sizeof(*config));
+ return 0;
+}
+
+/*
+ * ls2x_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ls2x_dma_issue_pending(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
+ ls2x_dma_start_transfer(lchan);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+}
+
+/*
+ * ls2x_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+ * Stops all DMA transactions.
+ */
+static int ls2x_dma_terminate_all(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ /* Setting stop cmd */
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ if (lchan->desc) {
+ vchan_terminate_vdesc(&lchan->desc->vdesc);
+ lchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&lchan->vchan, &head);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&lchan->vchan, &head);
+ return 0;
+}
+
+/*
+ * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ */
+static void ls2x_dma_synchronize(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_synchronize(&lchan->vchan);
+}
+
+static int ls2x_dma_pause(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ lchan->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int ls2x_dma_resume(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
+ lchan->desc->status = DMA_IN_PROGRESS;
+ ls2x_dma_write_cmd(lchan, LDMA_START);
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_isr - LS2X DMA Interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to ls2x_dma_chan
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
+{
+ struct ls2x_dma_chan *lchan = dev_id;
+ struct ls2x_dma_desc *desc;
+
+ spin_lock(&lchan->vchan.lock);
+ desc = lchan->desc;
+ if (desc) {
+ if (desc->cyclic) {
+ vchan_cyclic_callback(&desc->vdesc);
+ } else {
+ desc->status = DMA_COMPLETE;
+ vchan_cookie_complete(&desc->vdesc);
+ ls2x_dma_start_transfer(lchan);
+ }
+
+ /* ls2x_dma_start_transfer() updates lchan->desc */
+ if (!lchan->desc)
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ }
+ spin_unlock(&lchan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ls2x_dma_chan_init(struct platform_device *pdev,
+ struct ls2x_dma_priv *priv)
+{
+ struct ls2x_dma_chan *lchan = &priv->lchan;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ lchan->irq = platform_get_irq(pdev, 0);
+ if (lchan->irq < 0)
+ return lchan->irq;
+
+ ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), lchan);
+ if (ret)
+ return ret;
+
+ /* Initialize channels related values */
+ INIT_LIST_HEAD(&priv->ddev.channels);
+ lchan->vchan.desc_free = ls2x_dma_desc_free;
+ vchan_init(&lchan->vchan, &priv->ddev);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ls2x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ls2x_dma_priv *priv;
+ struct dma_device *ddev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return dev_err_probe(dev, PTR_ERR(priv->regs),
+ "devm_platform_ioremap_resource failed.\n");
+
+ priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->dma_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
+
+ ret = clk_prepare_enable(priv->dma_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
+
+ ret = ls2x_dma_chan_init(pdev, priv);
+ if (ret)
+ goto disable_clk;
+
+ ddev = &priv->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+
+ ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = ls2x_dma_issue_pending;
+ ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
+ ddev->device_config = ls2x_dma_slave_config;
+ ddev->device_terminate_all = ls2x_dma_terminate_all;
+ ddev->device_synchronize = ls2x_dma_synchronize;
+ ddev->device_pause = ls2x_dma_pause;
+ ddev->device_resume = ls2x_dma_resume;
+
+ ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&priv->ddev);
+ if (ret < 0)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
+ if (ret < 0)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(&priv->ddev);
+disable_clk:
+ clk_disable_unprepare(priv->dma_clk);
+
+ return ret;
+}
+
+/*
+ * ls2x_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void ls2x_dma_remove(struct platform_device *pdev)
+{
+ struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->ddev);
+ clk_disable_unprepare(priv->dma_clk);
+}
+
+static const struct of_device_id ls2x_dma_of_match_table[] = {
+ { .compatible = "loongson,ls2k1000-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
+
+static struct platform_driver ls2x_dmac_driver = {
+ .probe = ls2x_dma_probe,
+ .remove_new = ls2x_dma_remove,
+ .driver = {
+ .name = "ls2x-apbdma",
+ .of_match_table = ls2x_dma_of_match_table,
+ },
+};
+module_platform_driver(ls2x_dmac_driver);
+
+MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
index 1b0a958926..7b41c67097 100644
--- a/drivers/dma/milbeaut-hdmac.c
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -531,7 +531,7 @@ disable_clk:
return ret;
}
-static int milbeaut_hdmac_remove(struct platform_device *pdev)
+static void milbeaut_hdmac_remove(struct platform_device *pdev)
{
struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -546,16 +546,21 @@ static int milbeaut_hdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_hdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id milbeaut_hdmac_match[] = {
@@ -566,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
- .remove = milbeaut_hdmac_remove,
+ .remove_new = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index d29d01e730..2cce529b44 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -368,7 +368,7 @@ disable_xdmac:
return ret;
}
-static int milbeaut_xdmac_remove(struct platform_device *pdev)
+static void milbeaut_xdmac_remove(struct platform_device *pdev)
{
struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -383,8 +383,15 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_xdmac_free_chan_resources(chan);
}
@@ -392,8 +399,6 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&mdev->ddev);
disable_xdmac(mdev);
-
- return 0;
}
static const struct of_device_id milbeaut_xdmac_match[] = {
@@ -404,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
- .remove = milbeaut_xdmac_remove,
+ .remove_new = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d..e001f4f7aa 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 3125a2f162..4284736111 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -20,10 +20,13 @@
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "sf-pdma.h"
+#define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
+
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
@@ -65,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
u64 dst, u64 src, u64 size)
{
- desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_type = desc->chan->pdma->transfer_type;
desc->xfer_size = size;
desc->dst_addr = dst;
desc->src_addr = src;
@@ -492,6 +495,7 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
+ const struct sf_pdma_driver_platdata *ddata;
struct sf_pdma *pdma;
int ret, n_chans;
const enum dma_slave_buswidth widths =
@@ -517,6 +521,14 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
+ pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
+
+ ddata = device_get_match_data(&pdev->dev);
+ if (ddata) {
+ if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
+ pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
+ }
+
pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
@@ -563,7 +575,20 @@ static int sf_pdma_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, pdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform OF_DMA. (%d)\n", ret);
+ goto err_unregister;
+ }
+
return 0;
+
+err_unregister:
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return ret;
}
static void sf_pdma_remove(struct platform_device *pdev)
@@ -583,12 +608,25 @@ static void sf_pdma_remove(struct platform_device *pdev)
tasklet_kill(&ch->err_tasklet);
}
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
dma_async_device_unregister(&pdma->dma_dev);
}
+static const struct sf_pdma_driver_platdata mpfs_pdma = {
+ .quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
+};
+
static const struct of_device_id sf_pdma_dt_ids[] = {
- { .compatible = "sifive,fu540-c000-pdma" },
- { .compatible = "sifive,pdma0" },
+ {
+ .compatible = "sifive,fu540-c000-pdma",
+ }, {
+ .compatible = "sifive,pdma0",
+ }, {
+ .compatible = "microchip,mpfs-pdma",
+ .data = &mpfs_pdma,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index d05772b5d8..215e07183d 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -48,7 +48,8 @@
#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
/* Transfer Type */
-#define PDMA_FULL_SPEED 0xFF000008
+#define PDMA_FULL_SPEED 0xFF000000
+#define PDMA_STRICT_ORDERING BIT(3)
/* Error Recovery */
#define MAX_RETRY 1
@@ -112,8 +113,13 @@ struct sf_pdma {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *mappedbase;
+ u32 transfer_type;
u32 n_chans;
struct sf_pdma_chan chans[] __counted_by(n_chans);
};
+struct sf_pdma_driver_platdata {
+ u32 quirks;
+};
+
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index fea5bda34b..1f1e86ba5c 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -755,11 +755,11 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
static int rz_dmac_chan_probe(struct rz_dmac *dmac,
struct rz_dmac_chan *channel,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
struct rz_lmdesc *lmdesc;
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -767,7 +767,7 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
channel->mid_rid = -EINVAL;
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (channel->irq < 0)
return channel->irq;
@@ -845,9 +845,9 @@ static int rz_dmac_probe(struct platform_device *pdev)
struct dma_device *engine;
struct rz_dmac *dmac;
int channel_num;
- unsigned int i;
int ret;
int irq;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index a9b4302f60..f7cd0cad05 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -706,10 +706,10 @@ static const struct dev_pm_ops usb_dmac_pm = {
static int usb_dmac_chan_probe(struct usb_dmac *dmac,
struct usb_dmac_chan *uchan,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -717,7 +717,7 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (uchan->irq < 0)
return -ENODEV;
@@ -768,8 +768,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- unsigned int i;
int ret;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
@@ -869,7 +869,7 @@ static void usb_dmac_chan_remove(struct usb_dmac *dmac,
static void usb_dmac_remove(struct platform_device *pdev)
{
struct usb_dmac *dmac = platform_get_drvdata(pdev);
- int i;
+ u8 i;
for (i = 0; i < dmac->n_channels; ++i)
usb_dmac_chan_remove(dmac, &dmac->channels[i]);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 002833fb1f..2c48929914 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -31,13 +31,11 @@
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
+ * SoftLLI introduces relink overhead that could impact performance for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
@@ -184,7 +182,7 @@ static __maybe_unused u32 d40_backup_regs[] = {
/*
* since 9540 and 8540 has the same HW revision
- * use v4a for 9540 or ealier
+ * use v4a for 9540 or earlier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
@@ -411,7 +409,7 @@ struct d40_desc {
*
* @base: The virtual address of LCLA. 18 bit aligned.
* @dma_addr: DMA address, if mapped
- * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
+ * @base_unaligned: The original kmalloc pointer, if kmalloc is used.
* This pointer is only there for clean-up on error.
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
@@ -1655,7 +1653,7 @@ static void dma_tasklet(struct tasklet_struct *t)
return;
check_pending_tx:
- /* Rescue manouver if receiving double interrupts */
+ /* Rescue maneuver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -3412,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.base = (void *)page_list[i];
} else {
/*
- * After many attempts and no succees with finding the correct
+ * After many attempts and no success with finding the correct
* alignment, try with allocating a big buffer.
*/
dev_warn(base->dev,
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index fa4d4142a6..3642508e88 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
@@ -1348,8 +1351,8 @@ static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
static int tegra_dma_probe(struct platform_device *pdev)
{
const struct tegra_dma_chip_data *cdata = NULL;
- struct iommu_fwspec *iommu_spec;
- unsigned int stream_id, i;
+ unsigned int i;
+ u32 stream_id;
struct tegra_dma *tdma;
int ret;
@@ -1378,12 +1381,10 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->dma_dev.dev = &pdev->dev;
- iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
- if (!iommu_spec) {
+ if (!tegra_dev_iommu_get_stream_id(&pdev->dev, &stream_id)) {
dev_err(&pdev->dev, "Missing iommu stream-id\n");
return -EINVAL;
}
- stream_id = iommu_spec->ids[0] & 0xffff;
ret = device_property_read_u32(&pdev->dev, "dma-channel-mask",
&tdma->chan_mask);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 7a0586633b..24ad7077c5 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -153,6 +153,7 @@ struct tegra_adma {
void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
+ unsigned long *dma_chan_mask;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
+
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
@@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
@@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
+ tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
+ BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!tdma->dma_chan_mask)
+ return -ENOMEM;
+
+ /* Enable all channels by default */
+ bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
+ (u32 *)tdma->dma_chan_mask,
+ BITS_TO_U32(tdma->nr_channels));
+ if (ret < 0 && (ret != -EINVAL)) {
+ dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
+ return ret;
+ }
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!test_bit(i, tdma->dma_chan_mask))
+ continue;
+
tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
@@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
- for (i = 0; i < tdma->nr_channels; ++i)
- irq_dispose_mapping(tdma->channels[i].irq);
+ for (i = 0; i < tdma->nr_channels; ++i) {
+ if (tdma->channels[i].irq)
+ irq_dispose_mapping(tdma->channels[i].irq);
+ }
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index acc950bf60..d376c117ce 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -12,6 +12,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j721s2.o \
k3-psil-am62.o \
k3-psil-am62a.o \
- k3-psil-j784s4.o
+ k3-psil-j784s4.o \
+ k3-psil-am62p.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/k3-psil-am62p.c b/drivers/dma/ti/k3-psil-am62p.c
new file mode 100644
index 0000000000..0f338e16d9
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62p.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62p_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ /* CSIRX 1-3 (only for J722S) */
+ PSIL_CSI2RX(0x5100),
+ PSIL_CSI2RX(0x5101),
+ PSIL_CSI2RX(0x5102),
+ PSIL_CSI2RX(0x5103),
+ PSIL_CSI2RX(0x5104),
+ PSIL_CSI2RX(0x5105),
+ PSIL_CSI2RX(0x5106),
+ PSIL_CSI2RX(0x5107),
+ PSIL_CSI2RX(0x5108),
+ PSIL_CSI2RX(0x5109),
+ PSIL_CSI2RX(0x510a),
+ PSIL_CSI2RX(0x510b),
+ PSIL_CSI2RX(0x510c),
+ PSIL_CSI2RX(0x510d),
+ PSIL_CSI2RX(0x510e),
+ PSIL_CSI2RX(0x510f),
+ PSIL_CSI2RX(0x5110),
+ PSIL_CSI2RX(0x5111),
+ PSIL_CSI2RX(0x5112),
+ PSIL_CSI2RX(0x5113),
+ PSIL_CSI2RX(0x5114),
+ PSIL_CSI2RX(0x5115),
+ PSIL_CSI2RX(0x5116),
+ PSIL_CSI2RX(0x5117),
+ PSIL_CSI2RX(0x5118),
+ PSIL_CSI2RX(0x5119),
+ PSIL_CSI2RX(0x511a),
+ PSIL_CSI2RX(0x511b),
+ PSIL_CSI2RX(0x511c),
+ PSIL_CSI2RX(0x511d),
+ PSIL_CSI2RX(0x511e),
+ PSIL_CSI2RX(0x511f),
+ PSIL_CSI2RX(0x5200),
+ PSIL_CSI2RX(0x5201),
+ PSIL_CSI2RX(0x5202),
+ PSIL_CSI2RX(0x5203),
+ PSIL_CSI2RX(0x5204),
+ PSIL_CSI2RX(0x5205),
+ PSIL_CSI2RX(0x5206),
+ PSIL_CSI2RX(0x5207),
+ PSIL_CSI2RX(0x5208),
+ PSIL_CSI2RX(0x5209),
+ PSIL_CSI2RX(0x520a),
+ PSIL_CSI2RX(0x520b),
+ PSIL_CSI2RX(0x520c),
+ PSIL_CSI2RX(0x520d),
+ PSIL_CSI2RX(0x520e),
+ PSIL_CSI2RX(0x520f),
+ PSIL_CSI2RX(0x5210),
+ PSIL_CSI2RX(0x5211),
+ PSIL_CSI2RX(0x5212),
+ PSIL_CSI2RX(0x5213),
+ PSIL_CSI2RX(0x5214),
+ PSIL_CSI2RX(0x5215),
+ PSIL_CSI2RX(0x5216),
+ PSIL_CSI2RX(0x5217),
+ PSIL_CSI2RX(0x5218),
+ PSIL_CSI2RX(0x5219),
+ PSIL_CSI2RX(0x521a),
+ PSIL_CSI2RX(0x521b),
+ PSIL_CSI2RX(0x521c),
+ PSIL_CSI2RX(0x521d),
+ PSIL_CSI2RX(0x521e),
+ PSIL_CSI2RX(0x521f),
+ PSIL_CSI2RX(0x5300),
+ PSIL_CSI2RX(0x5301),
+ PSIL_CSI2RX(0x5302),
+ PSIL_CSI2RX(0x5303),
+ PSIL_CSI2RX(0x5304),
+ PSIL_CSI2RX(0x5305),
+ PSIL_CSI2RX(0x5306),
+ PSIL_CSI2RX(0x5307),
+ PSIL_CSI2RX(0x5308),
+ PSIL_CSI2RX(0x5309),
+ PSIL_CSI2RX(0x530a),
+ PSIL_CSI2RX(0x530b),
+ PSIL_CSI2RX(0x530c),
+ PSIL_CSI2RX(0x530d),
+ PSIL_CSI2RX(0x530e),
+ PSIL_CSI2RX(0x530f),
+ PSIL_CSI2RX(0x5310),
+ PSIL_CSI2RX(0x5311),
+ PSIL_CSI2RX(0x5312),
+ PSIL_CSI2RX(0x5313),
+ PSIL_CSI2RX(0x5314),
+ PSIL_CSI2RX(0x5315),
+ PSIL_CSI2RX(0x5316),
+ PSIL_CSI2RX(0x5317),
+ PSIL_CSI2RX(0x5318),
+ PSIL_CSI2RX(0x5319),
+ PSIL_CSI2RX(0x531a),
+ PSIL_CSI2RX(0x531b),
+ PSIL_CSI2RX(0x531c),
+ PSIL_CSI2RX(0x531d),
+ PSIL_CSI2RX(0x531e),
+ PSIL_CSI2RX(0x531f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62p_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62p_ep_map = {
+ .name = "am62p",
+ .src = am62p_src_ep_map,
+ .src_count = ARRAY_SIZE(am62p_src_ep_map),
+ .dst = am62p_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62p_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index c383723d1c..a577be97e3 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -45,5 +45,6 @@ extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
extern struct psil_ep_map am62a_ep_map;
extern struct psil_ep_map j784s4_ep_map;
+extern struct psil_ep_map am62p_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index c11389d67a..25148d9524 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -26,6 +26,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am62_ep_map },
{ .family = "AM62AX", .data = &am62a_ep_map },
{ .family = "J784S4", .data = &j784s4_ep_map },
+ { .family = "AM62PX", .data = &am62p_ep_map },
+ { .family = "J722S", .data = &am62p_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 037f1408e7..6400d06588 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4447,6 +4447,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am64_soc_data },
{ .family = "AM62AX", .data = &am64_soc_data },
{ .family = "J784S4", .data = &j721e_soc_data },
+ { .family = "AM62PX", .data = &am64_soc_data },
+ { .family = "J722S", .data = &am64_soc_data },
{ /* sentinel */ }
};
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index 618839df07..ad7125f6e2 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -453,7 +453,7 @@ disable_clk:
return ret;
}
-static int uniphier_mdmac_remove(struct platform_device *pdev)
+static void uniphier_mdmac_remove(struct platform_device *pdev)
{
struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -468,16 +468,21 @@ static int uniphier_mdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_mdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id uniphier_mdmac_match[] = {
@@ -488,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
- .remove = uniphier_mdmac_remove,
+ .remove_new = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 3a8ee2b173..3ce2dc2ad9 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -563,7 +563,7 @@ out_unregister_dmac:
return ret;
}
-static int uniphier_xdmac_remove(struct platform_device *pdev)
+static void uniphier_xdmac_remove(struct platform_device *pdev)
{
struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
struct dma_device *ddev = &xdev->ddev;
@@ -579,15 +579,20 @@ static int uniphier_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &ddev->channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *xdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_xdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(ddev);
-
- return 0;
}
static const struct of_device_id uniphier_xdmac_match[] = {
@@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
- .remove = uniphier_xdmac_remove,
+ .remove_new = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index e641a5083e..6ad08878e9 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -64,9 +64,10 @@ struct xdma_hw_desc {
__le64 next_desc;
};
-#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
-#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
-#define XDMA_DESC_BLOCK_ALIGN 4096
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 32
+#define XDMA_DESC_BLOCK_BOUNDARY 4096
/*
* Channel registers
@@ -76,6 +77,7 @@ struct xdma_hw_desc {
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
@@ -101,6 +103,7 @@ struct xdma_hw_desc {
#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
#define CHAN_CTRL_POLL_MODE_WB BIT(26)
@@ -111,8 +114,20 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
/* bits of the channel interrupt enable mask */
#define CHAN_IM_DESC_ERROR BIT(19)
#define CHAN_IM_READ_ERROR BIT(9)
@@ -134,18 +149,6 @@ struct xdma_hw_desc {
#define XDMA_SGDMA_DESC_ADJ 0x4088
#define XDMA_SGDMA_DESC_CREDIT 0x408c
-/* bits of the SG DMA control register */
-#define XDMA_CTRL_RUN_STOP BIT(0)
-#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
-#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
-#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
-#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
-#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
-#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
-#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
-#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
-#define XDMA_CTRL_POLL_MODE_WB BIT(26)
-
/*
* interrupt registers
*/
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 2c9c72d4b5..5a3a3293b2 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -78,27 +80,31 @@ struct xdma_chan {
* @vdesc: Virtual DMA descriptor
* @chan: DMA channel pointer
* @dir: Transferring direction of the request
- * @dev_addr: Physical address on DMA device side
* @desc_blocks: Hardware descriptor blocks
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
* @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
* @periods: Number of periods in the cyclic transfer
* @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
struct xdma_chan *chan;
enum dma_transfer_direction dir;
- u64 dev_addr;
struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
bool cyclic;
+ bool interleaved_dma;
u32 periods;
u32 period_size;
+ u32 frames_left;
+ bool error;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -276,6 +282,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
sw_desc->cyclic = cyclic;
+ sw_desc->error = false;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
@@ -371,10 +378,30 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
+
return 0;
}
/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+ int ret;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+
+ /* clear run stop bit to prevent any further auto-triggering */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+/**
* xdma_alloc_channels - Detect and allocate DMA channels
* @xdev: DMA device pointer
* @dir: Channel direction
@@ -444,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -476,6 +505,92 @@ static void xdma_issue_pending(struct dma_chan *chan)
}
/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ xdma_xfer_stop(xdma_chan);
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
+ vd = vchan_next_desc(&xdma_chan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ dma_cookie_complete(&vd->tx);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+ list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
+
+ vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
+ * @sw_desc: tx descriptor state container
+ * @src_addr: Value for a ->src_addr field of a first descriptor
+ * @dst_addr: Value for a ->dst_addr field of a first descriptor
+ * @size: Total size of a contiguous memory block
+ * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+ u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+ u32 left = size, len, desc_num = filled_descs_num;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+
+ dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+ desc = dblk->virt_addr;
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+ do {
+ len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(src_addr);
+ desc->dst_addr = cpu_to_le64(dst_addr);
+ if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+ desc = (++dblk)->virt_addr;
+ else
+ desc++;
+
+ src_addr += len;
+ dst_addr += len;
+ left -= len;
+ } while (left);
+
+ return desc_num - filled_descs_num;
+}
+
+/**
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
* @chan: DMA channel pointer
* @sgl: Transfer scatter gather list
@@ -491,13 +606,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
- u32 desc_num = 0, i, len, rest;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
- u64 dev_addr, *src, *dst;
+ u32 desc_num = 0, i;
+ u64 addr, dev_addr, *src, *dst;
struct scatterlist *sg;
- u64 addr;
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@@ -506,6 +618,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
+ sw_desc->cyclic = false;
+ sw_desc->interleaved_dma = false;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
@@ -517,32 +631,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = &addr;
}
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
- desc_num = 1;
+ desc_num = 0;
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
- rest = sg_dma_len(sg);
-
- do {
- len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
- /* set hardware descriptor */
- desc->bytes = cpu_to_le32(len);
- desc->src_addr = cpu_to_le64(*src);
- desc->dst_addr = cpu_to_le64(*dst);
-
- if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
- dblk++;
- desc = dblk->virt_addr;
- } else {
- desc++;
- }
-
- desc_num++;
- dev_addr += len;
- addr += len;
- rest -= len;
- } while (rest);
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+ dev_addr += sg_dma_len(sg);
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -576,9 +669,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
struct xdma_device *xdev = xdma_chan->xdev_hdl;
unsigned int periods = size / period_size;
struct dma_async_tx_descriptor *tx_desc;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
+ u64 addr, dev_addr, *src, *dst;
+ u32 desc_num;
unsigned int i;
/*
@@ -602,22 +695,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
sw_desc->periods = periods;
sw_desc->period_size = period_size;
sw_desc->dir = dir;
+ sw_desc->interleaved_dma = false;
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
+ addr = address;
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
- /* fill hardware descriptor */
+ desc_num = 0;
for (i = 0; i < periods; i++) {
- desc->bytes = cpu_to_le32(period_size);
- if (dir == DMA_MEM_TO_DEV) {
- desc->src_addr = cpu_to_le64(address + i * period_size);
- desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
- } else {
- desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
- desc->dst_addr = cpu_to_le64(address + i * period_size);
- }
-
- desc++;
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -633,6 +727,57 @@ failed:
}
/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ int i;
+ u32 desc_num = 0, period_size = 0;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_desc *sw_desc;
+ u64 src_addr, dst_addr;
+
+ for (i = 0; i < xt->frame_size; ++i)
+ desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = xt->dir;
+ sw_desc->interleaved_dma = true;
+ sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+ sw_desc->frames_left = xt->numf;
+ sw_desc->periods = xt->numf;
+
+ desc_num = 0;
+ src_addr = xt->src_start;
+ dst_addr = xt->dst_start;
+ for (i = 0; i < xt->frame_size; ++i) {
+ desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+ src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+ xt->sgl[i].size : 0);
+ dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+ xt->sgl[i].size : 0);
+ period_size += xt->sgl[i].size;
+ }
+ sw_desc->period_size = period_size;
+
+ tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+ if (tx_desc)
+ return tx_desc;
+
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
@@ -677,9 +822,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return -EINVAL;
}
- xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
- dev, XDMA_DESC_BLOCK_SIZE,
- XDMA_DESC_BLOCK_ALIGN, 0);
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
+ XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
if (!xdma_chan->desc_pool) {
xdma_err(xdev, "unable to allocate descriptor pool");
return -ENOMEM;
@@ -706,20 +850,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
vd = vchan_find_desc(&xdma_chan->vchan, cookie);
- if (vd)
- desc = to_xdma_desc(vd);
- if (!desc || !desc->cyclic) {
- spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- return ret;
- }
-
- period_idx = desc->completed_desc_num % desc->periods;
- residue = (desc->periods - period_idx) * desc->period_size;
+ if (!vd)
+ goto out;
+ desc = to_xdma_desc(vd);
+ if (desc->error) {
+ ret = DMA_ERROR;
+ } else if (desc->cyclic) {
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+ dma_set_residue(state, residue);
+ }
+out:
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- dma_set_residue(state, residue);
-
return ret;
}
@@ -732,11 +876,15 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
{
struct xdma_chan *xchan = dev_id;
u32 complete_desc_num = 0;
- struct xdma_device *xdev;
- struct virt_dma_desc *vd;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd, *next_vd;
struct xdma_desc *desc;
int ret;
u32 st;
+ bool repeat_tx;
+
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
spin_lock(&xchan->vchan.lock);
@@ -745,47 +893,76 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
if (!vd)
goto out;
- xchan->busy = false;
+ /* Clear-on-read the status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+ if (ret)
+ goto out;
+
desc = to_xdma_desc(vd);
- xdev = xchan->xdev_hdl;
+
+ st &= XDMA_CHAN_STATUS_MASK;
+ if ((st & XDMA_CHAN_ERROR_MASK) ||
+ !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+ desc->error = true;
+ xdma_err(xdev, "channel error, status register value: 0x%x", st);
+ goto out;
+ }
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num);
if (ret)
goto out;
- if (desc->cyclic) {
- desc->completed_desc_num = complete_desc_num;
-
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
- &st);
- if (ret)
+ if (desc->interleaved_dma) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+ xdma_xfer_start(xchan);
goto out;
+ }
- regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+ /* last desc of any frame */
+ desc->frames_left--;
+ if (desc->frames_left)
+ goto out;
- vchan_cyclic_callback(vd);
- goto out;
- }
+ /* last desc of the last frame */
+ repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+ next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+ if (next_vd)
+ repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+ if (repeat_tx) {
+ desc->frames_left = desc->periods;
+ desc->completed_desc_num = 0;
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+ xdma_xfer_start(xchan);
+ } else if (!desc->cyclic) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+
+ /* if all data blocks are transferred, remove and complete the request */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
- desc->completed_desc_num += complete_desc_num;
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
- /*
- * if all data blocks are transferred, remove and complete the request
- */
- if (desc->completed_desc_num == desc->desc_num) {
- list_del(&vd->node);
- vchan_cookie_complete(vd);
- goto out;
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+ } else {
+ desc->completed_desc_num = complete_desc_num;
+ vchan_cyclic_callback(vd);
}
- if (desc->completed_desc_num > desc->desc_num ||
- complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
- goto out;
-
- /* transfer the rest of data (SG only) */
- xdma_xfer_start(xchan);
-
out:
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
@@ -1082,6 +1259,9 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -1091,10 +1271,13 @@ static int xdma_probe(struct platform_device *pdev)
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 69587d85a7..eb0637d903 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -309,7 +310,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
- out_str_len);
+ out_str_len + 1);
snprintf(buf, out_str_len, "%d",
dpdma_debugfs.xilinx_dpdma_irq_done_count);
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}