summaryrefslogtreecommitdiffstats
path: root/drivers/dma/sh
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/dma/sh
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/sh')
-rw-r--r--drivers/dma/sh/Kconfig58
-rw-r--r--drivers/dma/sh/Makefile18
-rw-r--r--drivers/dma/sh/rcar-dmac.c2052
-rw-r--r--drivers/dma/sh/rz-dmac.c1003
-rw-r--r--drivers/dma/sh/shdma-arm.h48
-rw-r--r--drivers/dma/sh/shdma-base.c1052
-rw-r--r--drivers/dma/sh/shdma.h61
-rw-r--r--drivers/dma/sh/shdmac.c938
-rw-r--r--drivers/dma/sh/usb-dmac.c914
9 files changed, 6144 insertions, 0 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
new file mode 100644
index 000000000..c0b2997ab
--- /dev/null
+++ b/drivers/dma/sh/Kconfig
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DMA engine configuration for sh
+#
+
+config RENESAS_DMA
+ bool
+ select DMA_ENGINE
+
+#
+# DMA Engine Helpers
+#
+
+config SH_DMAE_BASE
+ bool "Renesas SuperH DMA Engine support"
+ depends on SUPERH || COMPILE_TEST
+ depends on !SUPERH || SH_DMA
+ depends on !SH_DMA_API
+ default y
+ select RENESAS_DMA
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+#
+# DMA Controllers
+#
+
+config SH_DMAE
+ tristate "Renesas SuperH DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+config RCAR_DMAC
+ tristate "Renesas R-Car Gen{2,3} and RZ/G{1,2} DMA Controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select RENESAS_DMA
+ help
+ This driver supports the general purpose DMA controller found in the
+ Renesas R-Car Gen{2,3} and RZ/G{1,2} SoCs.
+
+config RENESAS_USB_DMAC
+ tristate "Renesas USB-DMA Controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the USB-DMA controller found in the Renesas
+ SoCs.
+
+config RZ_DMAC
+ tristate "Renesas RZ/{G2L,V2L} DMA Controller"
+ depends on ARCH_RZG2L || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the general purpose DMA controller found in the
+ Renesas RZ/{G2L,V2L} SoC variants.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
new file mode 100644
index 000000000..360ab6d25
--- /dev/null
+++ b/drivers/dma/sh/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DMA Engine Helpers
+#
+
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
+
+#
+# DMA Controllers
+#
+
+shdma-y := shdmac.o
+shdma-objs := $(shdma-y)
+obj-$(CONFIG_SH_DMAE) += shdma.o
+
+obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
new file mode 100644
index 000000000..641d689d1
--- /dev/null
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -0,0 +1,2052 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car Gen2/Gen3 DMA Controller Driver
+ *
+ * Copyright (C) 2014-2019 Renesas Electronics Inc.
+ *
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/*
+ * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
+ * @node: entry in the parent's chunks list
+ * @src_addr: device source address
+ * @dst_addr: device destination address
+ * @size: transfer size in bytes
+ */
+struct rcar_dmac_xfer_chunk {
+ struct list_head node;
+
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ u32 size;
+};
+
+/*
+ * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
+ * @sar: value of the SAR register (source address)
+ * @dar: value of the DAR register (destination address)
+ * @tcr: value of the TCR register (transfer count)
+ */
+struct rcar_dmac_hw_desc {
+ u32 sar;
+ u32 dar;
+ u32 tcr;
+ u32 reserved;
+} __attribute__((__packed__));
+
+/*
+ * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
+ * @async_tx: base DMA asynchronous transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @xfer_shift: log2 of the transfer size
+ * @chcr: value of the channel configuration register for this transfer
+ * @node: entry in the channel's descriptors lists
+ * @chunks: list of transfer chunks for this transfer
+ * @running: the transfer chunk being currently processed
+ * @nchunks: number of transfer chunks for this transfer
+ * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
+ * @hwdescs.mem: hardware descriptors memory for the transfer
+ * @hwdescs.dma: device address of the hardware descriptors memory
+ * @hwdescs.size: size of the hardware descriptors in bytes
+ * @size: transfer size in bytes
+ * @cyclic: when set indicates that the DMA transfer is cyclic
+ */
+struct rcar_dmac_desc {
+ struct dma_async_tx_descriptor async_tx;
+ enum dma_transfer_direction direction;
+ unsigned int xfer_shift;
+ u32 chcr;
+
+ struct list_head node;
+ struct list_head chunks;
+ struct rcar_dmac_xfer_chunk *running;
+ unsigned int nchunks;
+
+ struct {
+ bool use;
+ struct rcar_dmac_hw_desc *mem;
+ dma_addr_t dma;
+ size_t size;
+ } hwdescs;
+
+ unsigned int size;
+ bool cyclic;
+};
+
+#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
+
+/*
+ * struct rcar_dmac_desc_page - One page worth of descriptors
+ * @node: entry in the channel's pages list
+ * @descs: array of DMA descriptors
+ * @chunks: array of transfer chunk descriptors
+ */
+struct rcar_dmac_desc_page {
+ struct list_head node;
+
+ union {
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs);
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks);
+ };
+};
+
+#define RCAR_DMAC_DESCS_PER_PAGE \
+ ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
+ sizeof(struct rcar_dmac_desc))
+#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
+ ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
+ sizeof(struct rcar_dmac_xfer_chunk))
+
+/*
+ * struct rcar_dmac_chan_slave - Slave configuration
+ * @slave_addr: slave memory address
+ * @xfer_size: size (in bytes) of hardware transfers
+ */
+struct rcar_dmac_chan_slave {
+ phys_addr_t slave_addr;
+ unsigned int xfer_size;
+};
+
+/*
+ * struct rcar_dmac_chan_map - Map of slave device phys to dma address
+ * @addr: slave dma address
+ * @dir: direction of mapping
+ * @slave: slave configuration that is mapped
+ */
+struct rcar_dmac_chan_map {
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ struct rcar_dmac_chan_slave slave;
+};
+
+/*
+ * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
+ * @chan: base DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @irq: channel IRQ
+ * @src: slave memory address and size on the source side
+ * @dst: slave memory address and size on the destination side
+ * @mid_rid: hardware MID/RID for the DMA client using this channel
+ * @lock: protects the channel CHCR register and the desc members
+ * @desc.free: list of free descriptors
+ * @desc.pending: list of pending descriptors (submitted with tx_submit)
+ * @desc.active: list of active descriptors (activated with issue_pending)
+ * @desc.done: list of completed descriptors
+ * @desc.wait: list of descriptors waiting for an ack
+ * @desc.running: the descriptor being processed (a member of the active list)
+ * @desc.chunks_free: list of free transfer chunk descriptors
+ * @desc.pages: list of pages used by allocated descriptors
+ */
+struct rcar_dmac_chan {
+ struct dma_chan chan;
+ void __iomem *iomem;
+ unsigned int index;
+ int irq;
+
+ struct rcar_dmac_chan_slave src;
+ struct rcar_dmac_chan_slave dst;
+ struct rcar_dmac_chan_map map;
+ int mid_rid;
+
+ spinlock_t lock;
+
+ struct {
+ struct list_head free;
+ struct list_head pending;
+ struct list_head active;
+ struct list_head done;
+ struct list_head wait;
+ struct rcar_dmac_desc *running;
+
+ struct list_head chunks_free;
+
+ struct list_head pages;
+ } desc;
+};
+
+#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
+
+/*
+ * struct rcar_dmac - R-Car Gen2 DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @dmac_base: remapped base register block
+ * @chan_base: remapped channel register block (optional)
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ * @channels_mask: bitfield of which DMA channels are managed by this driver
+ * @modules: bitmask of client modules in use
+ */
+struct rcar_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *dmac_base;
+ void __iomem *chan_base;
+
+ unsigned int n_channels;
+ struct rcar_dmac_chan *channels;
+ u32 channels_mask;
+
+ DECLARE_BITMAP(modules, 256);
+};
+
+#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+
+#define for_each_rcar_dmac_chan(i, dmac, chan) \
+ for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
+ if (!((dmac)->channels_mask & BIT(i))) continue; else
+
+/*
+ * struct rcar_dmac_of_data - This driver's OF data
+ * @chan_offset_base: DMAC channels base offset
+ * @chan_offset_stride: DMAC channels offset stride
+ */
+struct rcar_dmac_of_data {
+ u32 chan_offset_base;
+ u32 chan_offset_stride;
+};
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define RCAR_DMAISTA 0x0020
+#define RCAR_DMASEC 0x0030
+#define RCAR_DMAOR 0x0060
+#define RCAR_DMAOR_PRI_FIXED (0 << 8)
+#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
+#define RCAR_DMAOR_AE (1 << 2)
+#define RCAR_DMAOR_DME (1 << 0)
+#define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */
+#define RCAR_DMADPSEC 0x00a0
+
+#define RCAR_DMASAR 0x0000
+#define RCAR_DMADAR 0x0004
+#define RCAR_DMATCR 0x0008
+#define RCAR_DMATCR_MASK 0x00ffffff
+#define RCAR_DMATSR 0x0028
+#define RCAR_DMACHCR 0x000c
+#define RCAR_DMACHCR_CAE (1 << 31)
+#define RCAR_DMACHCR_CAIE (1 << 30)
+#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
+#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
+#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
+#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
+#define RCAR_DMACHCR_RPT_SAR (1 << 27)
+#define RCAR_DMACHCR_RPT_DAR (1 << 26)
+#define RCAR_DMACHCR_RPT_TCR (1 << 25)
+#define RCAR_DMACHCR_DPB (1 << 22)
+#define RCAR_DMACHCR_DSE (1 << 19)
+#define RCAR_DMACHCR_DSIE (1 << 18)
+#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
+#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
+#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
+#define RCAR_DMACHCR_DM_FIXED (0 << 14)
+#define RCAR_DMACHCR_DM_INC (1 << 14)
+#define RCAR_DMACHCR_DM_DEC (2 << 14)
+#define RCAR_DMACHCR_SM_FIXED (0 << 12)
+#define RCAR_DMACHCR_SM_INC (1 << 12)
+#define RCAR_DMACHCR_SM_DEC (2 << 12)
+#define RCAR_DMACHCR_RS_AUTO (4 << 8)
+#define RCAR_DMACHCR_RS_DMARS (8 << 8)
+#define RCAR_DMACHCR_IE (1 << 2)
+#define RCAR_DMACHCR_TE (1 << 1)
+#define RCAR_DMACHCR_DE (1 << 0)
+#define RCAR_DMATCRB 0x0018
+#define RCAR_DMATSRB 0x0038
+#define RCAR_DMACHCRB 0x001c
+#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
+#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
+#define RCAR_DMACHCRB_DPTR_SHIFT 16
+#define RCAR_DMACHCRB_DRST (1 << 15)
+#define RCAR_DMACHCRB_DTS (1 << 8)
+#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
+#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
+#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
+#define RCAR_DMARS 0x0040
+#define RCAR_DMABUFCR 0x0048
+#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
+#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
+#define RCAR_DMADPBASE 0x0050
+#define RCAR_DMADPBASE_MASK 0xfffffff0
+#define RCAR_DMADPBASE_SEL (1 << 0)
+#define RCAR_DMADPCR 0x0054
+#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
+#define RCAR_DMAFIXSAR 0x0010
+#define RCAR_DMAFIXDAR 0x0014
+#define RCAR_DMAFIXDPBASE 0x0060
+
+/* For R-Car Gen4 */
+#define RCAR_GEN4_DMACHCLR 0x0100
+
+/* Hardcode the MEMCPY transfer size to 4 bytes. */
+#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
+{
+ if (reg == RCAR_DMAOR)
+ writew(data, dmac->dmac_base + reg);
+ else
+ writel(data, dmac->dmac_base + reg);
+}
+
+static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
+{
+ if (reg == RCAR_DMAOR)
+ return readw(dmac->dmac_base + reg);
+ else
+ return readl(dmac->dmac_base + reg);
+}
+
+static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
+{
+ if (reg == RCAR_DMARS)
+ return readw(chan->iomem + reg);
+ else
+ return readl(chan->iomem + reg);
+}
+
+static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
+{
+ if (reg == RCAR_DMARS)
+ writew(data, chan->iomem + reg);
+ else
+ writel(data, chan->iomem + reg);
+}
+
+static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
+ struct rcar_dmac_chan *chan)
+{
+ if (dmac->chan_base)
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
+ else
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
+}
+
+static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
+{
+ struct rcar_dmac_chan *chan;
+ unsigned int i;
+
+ if (dmac->chan_base) {
+ for_each_rcar_dmac_chan(i, dmac, chan)
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
+ } else {
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
+}
+
+static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc = chan->desc.running;
+ u32 chcr = desc->chcr;
+
+ WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
+
+ if (chan->mid_rid >= 0)
+ rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
+
+ if (desc->hwdescs.use) {
+ struct rcar_dmac_xfer_chunk *chunk =
+ list_first_entry(&desc->chunks,
+ struct rcar_dmac_xfer_chunk, node);
+
+ dev_dbg(chan->chan.device->dev,
+ "chan%u: queue desc %p: %u@%pad\n",
+ chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
+ chunk->src_addr >> 32);
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
+ chunk->dst_addr >> 32);
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
+ desc->hwdescs.dma >> 32);
+#endif
+ rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
+ (desc->hwdescs.dma & 0xfffffff0) |
+ RCAR_DMADPBASE_SEL);
+ rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
+ RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
+ RCAR_DMACHCRB_DRST);
+
+ /*
+ * Errata: When descriptor memory is accessed through an IOMMU
+ * the DMADAR register isn't initialized automatically from the
+ * first descriptor at beginning of transfer by the DMAC like it
+ * should. Initialize it manually with the destination address
+ * of the first chunk.
+ */
+ rcar_dmac_chan_write(chan, RCAR_DMADAR,
+ chunk->dst_addr & 0xffffffff);
+
+ /*
+ * Program the descriptor stage interrupt to occur after the end
+ * of the first stage.
+ */
+ rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
+
+ chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
+ | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
+
+ /*
+ * If the descriptor isn't cyclic enable normal descriptor mode
+ * and the transfer completion interrupt.
+ */
+ if (!desc->cyclic)
+ chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
+ /*
+ * If the descriptor is cyclic and has a callback enable the
+ * descriptor stage interrupt in infinite repeat mode.
+ */
+ else if (desc->async_tx.callback)
+ chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
+ /*
+ * Otherwise just select infinite repeat mode without any
+ * interrupt.
+ */
+ else
+ chcr |= RCAR_DMACHCR_DPM_INFINITE;
+ } else {
+ struct rcar_dmac_xfer_chunk *chunk = desc->running;
+
+ dev_dbg(chan->chan.device->dev,
+ "chan%u: queue chunk %p: %u@%pad -> %pad\n",
+ chan->index, chunk, chunk->size, &chunk->src_addr,
+ &chunk->dst_addr);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
+ chunk->src_addr >> 32);
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
+ chunk->dst_addr >> 32);
+#endif
+ rcar_dmac_chan_write(chan, RCAR_DMASAR,
+ chunk->src_addr & 0xffffffff);
+ rcar_dmac_chan_write(chan, RCAR_DMADAR,
+ chunk->dst_addr & 0xffffffff);
+ rcar_dmac_chan_write(chan, RCAR_DMATCR,
+ chunk->size >> desc->xfer_shift);
+
+ chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
+ }
+
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR,
+ chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
+}
+
+static int rcar_dmac_init(struct rcar_dmac *dmac)
+{
+ u16 dmaor;
+
+ /* Clear all channels and enable the DMAC globally. */
+ rcar_dmac_chan_clear_all(dmac);
+ rcar_dmac_write(dmac, RCAR_DMAOR,
+ RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
+
+ dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
+ if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
+ dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors submission
+ */
+
+static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
+ struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
+ chan->index, tx->cookie, desc);
+
+ list_add_tail(&desc->node, &chan->desc.pending);
+ desc->running = list_first_entry(&desc->chunks,
+ struct rcar_dmac_xfer_chunk, node);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+
+/*
+ * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+ struct rcar_dmac_desc_page *page;
+ unsigned long flags;
+ LIST_HEAD(list);
+ unsigned int i;
+
+ page = (void *)get_zeroed_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
+ struct rcar_dmac_desc *desc = &page->descs[i];
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+ desc->async_tx.tx_submit = rcar_dmac_tx_submit;
+ INIT_LIST_HEAD(&desc->chunks);
+
+ list_add_tail(&desc->node, &list);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_splice_tail(&list, &chan->desc.free);
+ list_add_tail(&page->node, &chan->desc.pages);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/*
+ * rcar_dmac_desc_put - Release a DMA transfer descriptor
+ * @chan: the DMA channel
+ * @desc: the descriptor
+ *
+ * Put the descriptor and its transfer chunk descriptors back in the channel's
+ * free descriptors lists. The descriptor's chunks list will be reinitialized to
+ * an empty list as a result.
+ *
+ * The descriptor must have been removed from the channel's lists before calling
+ * this function.
+ */
+static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
+ struct rcar_dmac_desc *desc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
+ list_add(&desc->node, &chan->desc.free);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ /*
+ * We have to temporarily move all descriptors from the wait list to a
+ * local list as iterating over the wait list, even with
+ * list_for_each_entry_safe, isn't safe if we release the channel lock
+ * around the rcar_dmac_desc_put() call.
+ */
+ spin_lock_irqsave(&chan->lock, flags);
+ list_splice_init(&chan->desc.wait, &list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &list, node) {
+ if (async_tx_test_ack(&desc->async_tx)) {
+ list_del(&desc->node);
+ rcar_dmac_desc_put(chan, desc);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ /* Put the remaining descriptors back in the wait list. */
+ spin_lock_irqsave(&chan->lock, flags);
+ list_splice(&list, &chan->desc.wait);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/*
+ * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated descriptor or NULL if no descriptor can
+ * be allocated.
+ */
+static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc;
+ unsigned long flags;
+ int ret;
+
+ /* Recycle acked descriptors before attempting allocation. */
+ rcar_dmac_desc_recycle_acked(chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ while (list_empty(&chan->desc.free)) {
+ /*
+ * No free descriptors, allocate a page worth of them and try
+ * again, as someone else could race us to get the newly
+ * allocated descriptors. If the allocation fails return an
+ * error.
+ */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
+ if (ret < 0)
+ return NULL;
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
+ list_del(&desc->node);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return desc;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+ struct rcar_dmac_desc_page *page;
+ unsigned long flags;
+ LIST_HEAD(list);
+ unsigned int i;
+
+ page = (void *)get_zeroed_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
+ struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
+
+ list_add_tail(&chunk->node, &list);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_splice_tail(&list, &chan->desc.chunks_free);
+ list_add_tail(&page->node, &chan->desc.pages);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
+ * descriptor can be allocated.
+ */
+static struct rcar_dmac_xfer_chunk *
+rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_xfer_chunk *chunk;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ while (list_empty(&chan->desc.chunks_free)) {
+ /*
+ * No free descriptors, allocate a page worth of them and try
+ * again, as someone else could race us to get the newly
+ * allocated descriptors. If the allocation fails return an
+ * error.
+ */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
+ if (ret < 0)
+ return NULL;
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chunk = list_first_entry(&chan->desc.chunks_free,
+ struct rcar_dmac_xfer_chunk, node);
+ list_del(&chunk->node);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return chunk;
+}
+
+static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
+ struct rcar_dmac_desc *desc, size_t size)
+{
+ /*
+ * dma_alloc_coherent() allocates memory in page size increments. To
+ * avoid reallocating the hardware descriptors when the allocated size
+ * wouldn't change align the requested size to a multiple of the page
+ * size.
+ */
+ size = PAGE_ALIGN(size);
+
+ if (desc->hwdescs.size == size)
+ return;
+
+ if (desc->hwdescs.mem) {
+ dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
+ desc->hwdescs.mem, desc->hwdescs.dma);
+ desc->hwdescs.mem = NULL;
+ desc->hwdescs.size = 0;
+ }
+
+ if (!size)
+ return;
+
+ desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
+ &desc->hwdescs.dma, GFP_NOWAIT);
+ if (!desc->hwdescs.mem)
+ return;
+
+ desc->hwdescs.size = size;
+}
+
+static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
+ struct rcar_dmac_desc *desc)
+{
+ struct rcar_dmac_xfer_chunk *chunk;
+ struct rcar_dmac_hw_desc *hwdesc;
+
+ rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
+
+ hwdesc = desc->hwdescs.mem;
+ if (!hwdesc)
+ return -ENOMEM;
+
+ list_for_each_entry(chunk, &desc->chunks, node) {
+ hwdesc->sar = chunk->src_addr;
+ hwdesc->dar = chunk->dst_addr;
+ hwdesc->tcr = chunk->size >> desc->xfer_shift;
+ hwdesc++;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
+{
+ u32 chcr;
+ unsigned int i;
+
+ /*
+ * Ensure that the setting of the DE bit is actually 0 after
+ * clearing it.
+ */
+ for (i = 0; i < 1024; i++) {
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+ udelay(1);
+ }
+
+ dev_err(chan->chan.device->dev, "CHCR DE check error\n");
+}
+
+static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ /* set DE=0 and flush remaining data */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
+
+ /* make sure all remaining data was flushed */
+ rcar_dmac_chcr_de_barrier(chan);
+}
+
+static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
+ RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
+ RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+ rcar_dmac_chcr_de_barrier(chan);
+}
+
+static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Move all non-free descriptors to the local lists. */
+ list_splice_init(&chan->desc.pending, &descs);
+ list_splice_init(&chan->desc.active, &descs);
+ list_splice_init(&chan->desc.done, &descs);
+ list_splice_init(&chan->desc.wait, &descs);
+
+ chan->desc.running = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &descs, node) {
+ list_del(&desc->node);
+ rcar_dmac_desc_put(chan, desc);
+ }
+}
+
+static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
+{
+ struct rcar_dmac_chan *chan;
+ unsigned int i;
+
+ /* Stop all channels. */
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ /* Stop and reinitialize the channel. */
+ spin_lock_irq(&chan->lock);
+ rcar_dmac_chan_halt(chan);
+ spin_unlock_irq(&chan->lock);
+ }
+}
+
+static int rcar_dmac_chan_pause(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ rcar_dmac_clear_chcr_de(rchan);
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
+ struct rcar_dmac_desc *desc)
+{
+ static const u32 chcr_ts[] = {
+ RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
+ RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
+ RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
+ RCAR_DMACHCR_TS_64B,
+ };
+
+ unsigned int xfer_size;
+ u32 chcr;
+
+ switch (desc->direction) {
+ case DMA_DEV_TO_MEM:
+ chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
+ | RCAR_DMACHCR_RS_DMARS;
+ xfer_size = chan->src.xfer_size;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
+ | RCAR_DMACHCR_RS_DMARS;
+ xfer_size = chan->dst.xfer_size;
+ break;
+
+ case DMA_MEM_TO_MEM:
+ default:
+ chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
+ | RCAR_DMACHCR_RS_AUTO;
+ xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
+ break;
+ }
+
+ desc->xfer_shift = ilog2(xfer_size);
+ desc->chcr = chcr | chcr_ts[desc->xfer_shift];
+}
+
+/*
+ * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *
+rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, dma_addr_t dev_addr,
+ enum dma_transfer_direction dir, unsigned long dma_flags,
+ bool cyclic)
+{
+ struct rcar_dmac_xfer_chunk *chunk;
+ struct rcar_dmac_desc *desc;
+ struct scatterlist *sg;
+ unsigned int nchunks = 0;
+ unsigned int max_chunk_size;
+ unsigned int full_size = 0;
+ bool cross_boundary = false;
+ unsigned int i;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u32 high_dev_addr;
+ u32 high_mem_addr;
+#endif
+
+ desc = rcar_dmac_desc_get(chan);
+ if (!desc)
+ return NULL;
+
+ desc->async_tx.flags = dma_flags;
+ desc->async_tx.cookie = -EBUSY;
+
+ desc->cyclic = cyclic;
+ desc->direction = dir;
+
+ rcar_dmac_chan_configure_desc(chan, desc);
+
+ max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
+
+ /*
+ * Allocate and fill the transfer chunk descriptors. We own the only
+ * reference to the DMA descriptor, there's no need for locking.
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t mem_addr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ full_size += len;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (i == 0) {
+ high_dev_addr = dev_addr >> 32;
+ high_mem_addr = mem_addr >> 32;
+ }
+
+ if ((dev_addr >> 32 != high_dev_addr) ||
+ (mem_addr >> 32 != high_mem_addr))
+ cross_boundary = true;
+#endif
+ while (len) {
+ unsigned int size = min(len, max_chunk_size);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /*
+ * Prevent individual transfers from crossing 4GB
+ * boundaries.
+ */
+ if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
+ size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
+ cross_boundary = true;
+ }
+ if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
+ size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
+ cross_boundary = true;
+ }
+#endif
+
+ chunk = rcar_dmac_xfer_chunk_get(chan);
+ if (!chunk) {
+ rcar_dmac_desc_put(chan, desc);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ chunk->src_addr = dev_addr;
+ chunk->dst_addr = mem_addr;
+ } else {
+ chunk->src_addr = mem_addr;
+ chunk->dst_addr = dev_addr;
+ }
+
+ chunk->size = size;
+
+ dev_dbg(chan->chan.device->dev,
+ "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
+ chan->index, chunk, desc, i, sg, size, len,
+ &chunk->src_addr, &chunk->dst_addr);
+
+ mem_addr += size;
+ if (dir == DMA_MEM_TO_MEM)
+ dev_addr += size;
+
+ len -= size;
+
+ list_add_tail(&chunk->node, &desc->chunks);
+ nchunks++;
+ }
+ }
+
+ desc->nchunks = nchunks;
+ desc->size = full_size;
+
+ /*
+ * Use hardware descriptor lists if possible when more than one chunk
+ * needs to be transferred (otherwise they don't make much sense).
+ *
+ * Source/Destination address should be located in same 4GiB region
+ * in the 40bit address space when it uses Hardware descriptor,
+ * and cross_boundary is checking it.
+ */
+ desc->hwdescs.use = !cross_boundary && nchunks > 1;
+ if (desc->hwdescs.use) {
+ if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
+ desc->hwdescs.use = false;
+ }
+
+ return &desc->async_tx;
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ int ret;
+
+ INIT_LIST_HEAD(&rchan->desc.chunks_free);
+ INIT_LIST_HEAD(&rchan->desc.pages);
+
+ /* Preallocate descriptors. */
+ ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
+ if (ret < 0)
+ return -ENOMEM;
+
+ ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
+ if (ret < 0)
+ return -ENOMEM;
+
+ return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+ struct rcar_dmac_chan_map *map = &rchan->map;
+ struct rcar_dmac_desc_page *page, *_page;
+ struct rcar_dmac_desc *desc;
+ LIST_HEAD(list);
+
+ /* Protect against ISR */
+ spin_lock_irq(&rchan->lock);
+ rcar_dmac_chan_halt(rchan);
+ spin_unlock_irq(&rchan->lock);
+
+ /*
+ * Now no new interrupts will occur, but one might already be
+ * running. Wait for it to finish before freeing resources.
+ */
+ synchronize_irq(rchan->irq);
+
+ if (rchan->mid_rid >= 0) {
+ /* The caller is holding dma_list_mutex */
+ clear_bit(rchan->mid_rid, dmac->modules);
+ rchan->mid_rid = -EINVAL;
+ }
+
+ list_splice_init(&rchan->desc.free, &list);
+ list_splice_init(&rchan->desc.pending, &list);
+ list_splice_init(&rchan->desc.active, &list);
+ list_splice_init(&rchan->desc.done, &list);
+ list_splice_init(&rchan->desc.wait, &list);
+
+ rchan->desc.running = NULL;
+
+ list_for_each_entry(desc, &list, node)
+ rcar_dmac_realloc_hwdesc(rchan, desc, 0);
+
+ list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
+ list_del(&page->node);
+ free_page((unsigned long)page);
+ }
+
+ /* Remove slave mapping if present. */
+ if (map->slave.xfer_size) {
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+ }
+
+ pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct scatterlist sgl;
+
+ if (!len)
+ return NULL;
+
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
+ offset_in_page(dma_src));
+ sg_dma_address(&sgl) = dma_src;
+ sg_dma_len(&sgl) = len;
+
+ return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
+ DMA_MEM_TO_MEM, flags, false);
+}
+
+static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct rcar_dmac_chan_map *map = &rchan->map;
+ phys_addr_t dev_addr;
+ size_t dev_size;
+ enum dma_data_direction dev_dir;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = rchan->src.slave_addr;
+ dev_size = rchan->src.xfer_size;
+ dev_dir = DMA_TO_DEVICE;
+ } else {
+ dev_addr = rchan->dst.slave_addr;
+ dev_size = rchan->dst.xfer_size;
+ dev_dir = DMA_FROM_DEVICE;
+ }
+
+ /* Reuse current map if possible. */
+ if (dev_addr == map->slave.slave_addr &&
+ dev_size == map->slave.xfer_size &&
+ dev_dir == map->dir)
+ return 0;
+
+ /* Remove old mapping if present. */
+ if (map->slave.xfer_size)
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+
+ /* Create new slave address map. */
+ map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
+ dev_dir, 0);
+
+ if (dma_mapping_error(chan->device->dev, map->addr)) {
+ dev_err(chan->device->dev,
+ "chan%u: failed to map %zx@%pap", rchan->index,
+ dev_size, &dev_addr);
+ return -EIO;
+ }
+
+ dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
+ rchan->index, dev_size, &dev_addr, &map->addr,
+ dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
+
+ map->slave.slave_addr = dev_addr;
+ map->slave.xfer_size = dev_size;
+ map->dir = dev_dir;
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, rchan->mid_rid);
+ return NULL;
+ }
+
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
+ return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
+ dir, flags, false);
+}
+
+#define RCAR_DMAC_MAX_SG_LEN 32
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int i;
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (rchan->mid_rid < 0 || buf_len < period_len) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+ __func__, buf_len, period_len, rchan->mid_rid);
+ return NULL;
+ }
+
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
+ dev_err(chan->device->dev,
+ "chan%u: sg length %d exceeds limit %d",
+ rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
+ return NULL;
+ }
+
+ /*
+ * Allocate the sg list dynamically as it would consume too much stack
+ * space.
+ */
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, sg_len);
+
+ for (i = 0; i < sg_len; ++i) {
+ dma_addr_t src = buf_addr + (period_len * i);
+
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+ offset_in_page(src));
+ sg_dma_address(&sgl[i]) = src;
+ sg_dma_len(&sgl[i]) = period_len;
+ }
+
+ desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
+ dir, flags, true);
+
+ kfree(sgl);
+ return desc;
+}
+
+static int rcar_dmac_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ /*
+ * We could lock this, but you shouldn't be configuring the
+ * channel, while using it...
+ */
+ rchan->src.slave_addr = cfg->src_addr;
+ rchan->dst.slave_addr = cfg->dst_addr;
+ rchan->src.xfer_size = cfg->src_addr_width;
+ rchan->dst.xfer_size = cfg->dst_addr_width;
+
+ return 0;
+}
+
+static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ rcar_dmac_chan_halt(rchan);
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ /*
+ * FIXME: No new interrupt can occur now, but the IRQ thread might still
+ * be running.
+ */
+
+ rcar_dmac_chan_reinit(rchan);
+
+ return 0;
+}
+
+static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct rcar_dmac_desc *desc = chan->desc.running;
+ struct rcar_dmac_xfer_chunk *running = NULL;
+ struct rcar_dmac_xfer_chunk *chunk;
+ enum dma_status status;
+ unsigned int residue = 0;
+ unsigned int dptr = 0;
+ unsigned int chcrb;
+ unsigned int tcrb;
+ unsigned int i;
+
+ if (!desc)
+ return 0;
+
+ /*
+ * If the cookie corresponds to a descriptor that has been completed
+ * there is no residue. The same check has already been performed by the
+ * caller but without holding the channel lock, so the descriptor could
+ * now be complete.
+ */
+ status = dma_cookie_status(&chan->chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ return 0;
+
+ /*
+ * If the cookie doesn't correspond to the currently running transfer
+ * then the descriptor hasn't been processed yet, and the residue is
+ * equal to the full descriptor size.
+ * Also, a client driver is possible to call this function before
+ * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
+ * will be the next descriptor, and the done list will appear. So, if
+ * the argument cookie matches the done list's cookie, we can assume
+ * the residue is zero.
+ */
+ if (cookie != desc->async_tx.cookie) {
+ list_for_each_entry(desc, &chan->desc.done, node) {
+ if (cookie == desc->async_tx.cookie)
+ return 0;
+ }
+ list_for_each_entry(desc, &chan->desc.pending, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+ list_for_each_entry(desc, &chan->desc.active, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+
+ /*
+ * No descriptor found for the cookie, there's thus no residue.
+ * This shouldn't happen if the calling driver passes a correct
+ * cookie value.
+ */
+ WARN(1, "No descriptor for cookie!");
+ return 0;
+ }
+
+ /*
+ * We need to read two registers.
+ * Make sure the control register does not skip to next chunk
+ * while reading the counter.
+ * Trying it 3 times should be enough: Initial read, retry, retry
+ * for the paranoid.
+ */
+ for (i = 0; i < 3; i++) {
+ chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK;
+ tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+ /* Still the same? */
+ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK))
+ break;
+ }
+ WARN_ONCE(i >= 3, "residue might be not continuous!");
+
+ /*
+ * In descriptor mode the descriptor running pointer is not maintained
+ * by the interrupt handler, find the running descriptor from the
+ * descriptor pointer field in the CHCRB register. In non-descriptor
+ * mode just use the running descriptor pointer.
+ */
+ if (desc->hwdescs.use) {
+ dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
+ if (dptr == 0)
+ dptr = desc->nchunks;
+ dptr--;
+ WARN_ON(dptr >= desc->nchunks);
+ } else {
+ running = desc->running;
+ }
+
+ /* Compute the size of all chunks still to be transferred. */
+ list_for_each_entry_reverse(chunk, &desc->chunks, node) {
+ if (chunk == running || ++dptr == desc->nchunks)
+ break;
+
+ residue += chunk->size;
+ }
+
+ /* Add the residue for the current chunk. */
+ residue += tcrb << desc->xfer_shift;
+
+ return residue;
+}
+
+static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int residue;
+ bool cyclic;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE || !txstate)
+ return status;
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ residue = rcar_dmac_chan_get_residue(rchan, cookie);
+ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ /* if there's no residue, the cookie is complete */
+ if (!residue && !cyclic)
+ return DMA_COMPLETE;
+
+ dma_set_residue(txstate, residue);
+
+ return status;
+}
+
+static void rcar_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rchan->lock, flags);
+
+ if (list_empty(&rchan->desc.pending))
+ goto done;
+
+ /* Append the pending list to the active list. */
+ list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
+
+ /*
+ * If no transfer is running pick the first descriptor from the active
+ * list and start the transfer.
+ */
+ if (!rchan->desc.running) {
+ struct rcar_dmac_desc *desc;
+
+ desc = list_first_entry(&rchan->desc.active,
+ struct rcar_dmac_desc, node);
+ rchan->desc.running = desc;
+
+ rcar_dmac_chan_start_xfer(rchan);
+ }
+
+done:
+ spin_unlock_irqrestore(&rchan->lock, flags);
+}
+
+static void rcar_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ synchronize_irq(rchan->irq);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc = chan->desc.running;
+ unsigned int stage;
+
+ if (WARN_ON(!desc || !desc->cyclic)) {
+ /*
+ * This should never happen, there should always be a running
+ * cyclic descriptor when a descriptor stage end interrupt is
+ * triggered. Warn and return.
+ */
+ return IRQ_NONE;
+ }
+
+ /* Program the interrupt pointer to the next stage. */
+ stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+ rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
+{
+ struct rcar_dmac_desc *desc = chan->desc.running;
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+
+ if (WARN_ON_ONCE(!desc)) {
+ /*
+ * This should never happen, there should always be a running
+ * descriptor when a transfer end interrupt is triggered. Warn
+ * and return.
+ */
+ return IRQ_NONE;
+ }
+
+ /*
+ * The transfer end interrupt isn't generated for each chunk when using
+ * descriptor mode. Only update the running chunk pointer in
+ * non-descriptor mode.
+ */
+ if (!desc->hwdescs.use) {
+ /*
+ * If we haven't completed the last transfer chunk simply move
+ * to the next one. Only wake the IRQ thread if the transfer is
+ * cyclic.
+ */
+ if (!list_is_last(&desc->running->node, &desc->chunks)) {
+ desc->running = list_next_entry(desc->running, node);
+ if (!desc->cyclic)
+ ret = IRQ_HANDLED;
+ goto done;
+ }
+
+ /*
+ * We've completed the last transfer chunk. If the transfer is
+ * cyclic, move back to the first one.
+ */
+ if (desc->cyclic) {
+ desc->running =
+ list_first_entry(&desc->chunks,
+ struct rcar_dmac_xfer_chunk,
+ node);
+ goto done;
+ }
+ }
+
+ /* The descriptor is complete, move it to the done list. */
+ list_move_tail(&desc->node, &chan->desc.done);
+
+ /* Queue the next descriptor, if any. */
+ if (!list_empty(&chan->desc.active))
+ chan->desc.running = list_first_entry(&chan->desc.active,
+ struct rcar_dmac_desc,
+ node);
+ else
+ chan->desc.running = NULL;
+
+done:
+ if (chan->desc.running)
+ rcar_dmac_chan_start_xfer(chan);
+
+ return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
+{
+ u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
+ struct rcar_dmac_chan *chan = dev;
+ irqreturn_t ret = IRQ_NONE;
+ bool reinit = false;
+ u32 chcr;
+
+ spin_lock(&chan->lock);
+
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (chcr & RCAR_DMACHCR_CAE) {
+ struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
+
+ /*
+ * We don't need to call rcar_dmac_chan_halt()
+ * because channel is already stopped in error case.
+ * We need to clear register and check DE bit as recovery.
+ */
+ rcar_dmac_chan_clear(dmac, chan);
+ rcar_dmac_chcr_de_barrier(chan);
+ reinit = true;
+ goto spin_lock_end;
+ }
+
+ if (chcr & RCAR_DMACHCR_TE)
+ mask |= RCAR_DMACHCR_DE;
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+ if (mask & RCAR_DMACHCR_DE)
+ rcar_dmac_chcr_de_barrier(chan);
+
+ if (chcr & RCAR_DMACHCR_DSE)
+ ret |= rcar_dmac_isr_desc_stage_end(chan);
+
+ if (chcr & RCAR_DMACHCR_TE)
+ ret |= rcar_dmac_isr_transfer_end(chan);
+
+spin_lock_end:
+ spin_unlock(&chan->lock);
+
+ if (reinit) {
+ dev_err(chan->chan.device->dev, "Channel Address Error\n");
+
+ rcar_dmac_chan_reinit(chan);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
+{
+ struct rcar_dmac_chan *chan = dev;
+ struct rcar_dmac_desc *desc;
+ struct dmaengine_desc_callback cb;
+
+ spin_lock_irq(&chan->lock);
+
+ /* For cyclic transfers notify the user after every chunk. */
+ if (chan->desc.running && chan->desc.running->cyclic) {
+ desc = chan->desc.running;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+
+ if (dmaengine_desc_callback_valid(&cb)) {
+ spin_unlock_irq(&chan->lock);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+ spin_lock_irq(&chan->lock);
+ }
+ }
+
+ /*
+ * Call the callback function for all descriptors on the done list and
+ * move them to the ack wait list.
+ */
+ while (!list_empty(&chan->desc.done)) {
+ desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
+ node);
+ dma_cookie_complete(&desc->async_tx);
+ list_del(&desc->node);
+
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
+ spin_unlock_irq(&chan->lock);
+ /*
+ * We own the only reference to this descriptor, we can
+ * safely dereference it without holding the channel
+ * lock.
+ */
+ dmaengine_desc_callback_invoke(&cb, NULL);
+ spin_lock_irq(&chan->lock);
+ }
+
+ list_add_tail(&desc->node, &chan->desc.wait);
+ }
+
+ spin_unlock_irq(&chan->lock);
+
+ /* Recycle all acked descriptors. */
+ rcar_dmac_desc_recycle_acked(chan);
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+ struct of_phandle_args *dma_spec = arg;
+
+ /*
+ * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
+ * function knows from which device it wants to allocate a channel from,
+ * and would be perfectly capable of selecting the channel it wants.
+ * Forcing it to call dma_request_channel() and iterate through all
+ * channels from all controllers is just pointless.
+ */
+ if (chan->device->device_config != rcar_dmac_device_config)
+ return false;
+
+ return !test_and_set_bit(dma_spec->args[0], dmac->modules);
+}
+
+static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct rcar_dmac_chan *rchan;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
+ if (!chan)
+ return NULL;
+
+ rchan = to_rcar_dmac_chan(chan);
+ rchan->mid_rid = dma_spec->args[0];
+
+ return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#ifdef CONFIG_PM
+static int rcar_dmac_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int rcar_dmac_runtime_resume(struct device *dev)
+{
+ struct rcar_dmac *dmac = dev_get_drvdata(dev);
+
+ return rcar_dmac_init(dmac);
+}
+#endif
+
+static const struct dev_pm_ops rcar_dmac_pm = {
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
+ NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
+ struct rcar_dmac_chan *rchan)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ struct dma_chan *chan = &rchan->chan;
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ rchan->mid_rid = -EINVAL;
+
+ spin_lock_init(&rchan->lock);
+
+ INIT_LIST_HEAD(&rchan->desc.free);
+ INIT_LIST_HEAD(&rchan->desc.pending);
+ INIT_LIST_HEAD(&rchan->desc.active);
+ INIT_LIST_HEAD(&rchan->desc.done);
+ INIT_LIST_HEAD(&rchan->desc.wait);
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", rchan->index);
+ rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (rchan->irq < 0)
+ return -ENODEV;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), rchan->index);
+ if (!irqname)
+ return -ENOMEM;
+
+ /*
+ * Initialize the DMA engine channel and add it to the DMA engine
+ * channels list.
+ */
+ chan->device = &dmac->engine;
+ dma_cookie_init(chan);
+
+ list_add_tail(&chan->device_node, &dmac->engine.channels);
+
+ ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
+ rcar_dmac_isr_channel,
+ rcar_dmac_isr_channel_thread, 0,
+ irqname, rchan);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ rchan->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define RCAR_DMAC_MAX_CHANNELS 32
+
+static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ /* The hardware and driver don't support more than 32 bits in CHCLR */
+ if (dmac->n_channels <= 0 ||
+ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n",
+ dmac->n_channels);
+ return -EINVAL;
+ }
+
+ /*
+ * If the driver is unable to read dma-channel-mask property,
+ * the driver assumes that it can use all channels.
+ */
+ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+ of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
+
+ /* If the property has out-of-channel mask, this driver clears it */
+ dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
+
+ return 0;
+}
+
+static int rcar_dmac_probe(struct platform_device *pdev)
+{
+ const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
+ DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
+ DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
+ DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+ const struct rcar_dmac_of_data *data;
+ struct rcar_dmac_chan *chan;
+ struct dma_device *engine;
+ void __iomem *chan_base;
+ struct rcar_dmac *dmac;
+ unsigned int i;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+
+ ret = rcar_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
+ * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
+ * is connected to microTLB 0 on currently supported platforms, so we
+ * can't use it with the IPMMU. As the IOMMU API operates at the device
+ * level we can't disable it selectively, so ignore channel 0 for now if
+ * the device is part of an IOMMU group.
+ */
+ if (device_iommu_mapped(&pdev->dev))
+ dmac->channels_mask &= ~BIT(0);
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources. */
+ dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->dmac_base))
+ return PTR_ERR(dmac->dmac_base);
+
+ if (!data->chan_offset_base) {
+ dmac->chan_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->chan_base))
+ return PTR_ERR(dmac->chan_base);
+
+ chan_base = dmac->chan_base;
+ } else {
+ chan_base = dmac->dmac_base + data->chan_offset_base;
+ }
+
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ chan->index = i;
+ chan->iomem = chan_base + i * data->chan_offset_stride;
+ }
+
+ /* Enable runtime PM and initialize the device. */
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+ goto err_pm_disable;
+ }
+
+ ret = rcar_dmac_init(dmac);
+ pm_runtime_put(&pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset device\n");
+ goto err_pm_disable;
+ }
+
+ /* Initialize engine */
+ engine = &dmac->engine;
+
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+ engine->dev = &pdev->dev;
+ engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
+
+ engine->src_addr_widths = widths;
+ engine->dst_addr_widths = widths;
+ engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
+ engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
+ engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
+ engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
+ engine->device_config = rcar_dmac_device_config;
+ engine->device_pause = rcar_dmac_chan_pause;
+ engine->device_terminate_all = rcar_dmac_chan_terminate_all;
+ engine->device_tx_status = rcar_dmac_tx_status;
+ engine->device_issue_pending = rcar_dmac_issue_pending;
+ engine->device_synchronize = rcar_dmac_device_synchronize;
+
+ INIT_LIST_HEAD(&engine->channels);
+
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ ret = rcar_dmac_chan_probe(dmac, chan);
+ if (ret < 0)
+ goto err_pm_disable;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto err_pm_disable;
+
+ /*
+ * Register the DMA engine device.
+ *
+ * Default transfer size of 32 bytes requires 32-byte alignment.
+ */
+ ret = dma_async_device_register(engine);
+ if (ret < 0)
+ goto err_dma_free;
+
+ return 0;
+
+err_dma_free:
+ of_dma_controller_free(pdev->dev.of_node);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int rcar_dmac_remove(struct platform_device *pdev)
+{
+ struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static void rcar_dmac_shutdown(struct platform_device *pdev)
+{
+ struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+ rcar_dmac_stop_all_chan(dmac);
+}
+
+static const struct rcar_dmac_of_data rcar_dmac_data = {
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
+static const struct rcar_dmac_of_data rcar_gen4_dmac_data = {
+ .chan_offset_base = 0x0,
+ .chan_offset_stride = 0x1000,
+};
+
+static const struct of_device_id rcar_dmac_of_ids[] = {
+ {
+ .compatible = "renesas,rcar-dmac",
+ .data = &rcar_dmac_data,
+ }, {
+ .compatible = "renesas,rcar-gen4-dmac",
+ .data = &rcar_gen4_dmac_data,
+ }, {
+ .compatible = "renesas,dmac-r8a779a0",
+ .data = &rcar_gen4_dmac_data,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
+
+static struct platform_driver rcar_dmac_driver = {
+ .driver = {
+ .pm = &rcar_dmac_pm,
+ .name = "rcar-dmac",
+ .of_match_table = rcar_dmac_of_ids,
+ },
+ .probe = rcar_dmac_probe,
+ .remove = rcar_dmac_remove,
+ .shutdown = rcar_dmac_shutdown,
+};
+
+module_platform_driver(rcar_dmac_driver);
+
+MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
new file mode 100644
index 000000000..2967141f4
--- /dev/null
+++ b/drivers/dma/sh/rz-dmac.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L DMA Controller Driver
+ *
+ * Based on imx-dma.c
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+enum rz_dmac_prep_type {
+ RZ_DMAC_DESC_MEMCPY,
+ RZ_DMAC_DESC_SLAVE_SG,
+};
+
+struct rz_lmdesc {
+ u32 header;
+ u32 sa;
+ u32 da;
+ u32 tb;
+ u32 chcfg;
+ u32 chitvl;
+ u32 chext;
+ u32 nxla;
+};
+
+struct rz_dmac_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t src;
+ dma_addr_t dest;
+ size_t len;
+ struct list_head node;
+ enum dma_transfer_direction direction;
+ enum rz_dmac_prep_type type;
+ /* For slave sg */
+ struct scatterlist *sg;
+ unsigned int sgcount;
+};
+
+#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
+
+struct rz_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *ch_base;
+ void __iomem *ch_cmn_base;
+ unsigned int index;
+ int irq;
+ struct rz_dmac_desc *desc;
+ int descs_allocated;
+
+ enum dma_slave_buswidth src_word_size;
+ enum dma_slave_buswidth dst_word_size;
+ dma_addr_t src_per_address;
+ dma_addr_t dst_per_address;
+
+ u32 chcfg;
+ u32 chctrl;
+ int mid_rid;
+
+ struct list_head ld_free;
+ struct list_head ld_queue;
+ struct list_head ld_active;
+
+ struct {
+ struct rz_lmdesc *base;
+ struct rz_lmdesc *head;
+ struct rz_lmdesc *tail;
+ dma_addr_t base_dma;
+ } lmdesc;
+};
+
+#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+
+struct rz_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *ext_base;
+
+ unsigned int n_channels;
+ struct rz_dmac_chan *channels;
+
+ DECLARE_BITMAP(modules, 1024);
+};
+
+#define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
+
+/*
+ * -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define CHSTAT 0x0024
+#define CHCTRL 0x0028
+#define CHCFG 0x002c
+#define NXLA 0x0038
+
+#define DCTRL 0x0000
+
+#define EACH_CHANNEL_OFFSET 0x0040
+#define CHANNEL_0_7_OFFSET 0x0000
+#define CHANNEL_0_7_COMMON_BASE 0x0300
+#define CHANNEL_8_15_OFFSET 0x0400
+#define CHANNEL_8_15_COMMON_BASE 0x0700
+
+#define CHSTAT_ER BIT(4)
+#define CHSTAT_EN BIT(0)
+
+#define CHCTRL_CLRINTMSK BIT(17)
+#define CHCTRL_CLRSUS BIT(9)
+#define CHCTRL_CLRTC BIT(6)
+#define CHCTRL_CLREND BIT(5)
+#define CHCTRL_CLRRQ BIT(4)
+#define CHCTRL_SWRST BIT(3)
+#define CHCTRL_STG BIT(2)
+#define CHCTRL_CLREN BIT(1)
+#define CHCTRL_SETEN BIT(0)
+#define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
+ CHCTRL_CLRTC | CHCTRL_CLREND | \
+ CHCTRL_CLRRQ | CHCTRL_SWRST | \
+ CHCTRL_CLREN)
+
+#define CHCFG_DMS BIT(31)
+#define CHCFG_DEM BIT(24)
+#define CHCFG_DAD BIT(21)
+#define CHCFG_SAD BIT(20)
+#define CHCFG_REQD BIT(3)
+#define CHCFG_SEL(bits) ((bits) & 0x07)
+#define CHCFG_MEM_COPY (0x80400008)
+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
+#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
+#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
+#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
+#define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
+
+#define MID_RID_MASK GENMASK(9, 0)
+#define CHCFG_MASK GENMASK(15, 10)
+#define CHCFG_DS_INVALID 0xFF
+#define DCTRL_LVINT BIT(1)
+#define DCTRL_PR BIT(0)
+#define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
+
+/* LINK MODE DESCRIPTOR */
+#define HEADER_LV BIT(0)
+
+#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
+#define RZ_DMAC_MAX_CHANNELS 16
+#define DMAC_NR_LMDESC 64
+
+/*
+ * -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->base + offset);
+}
+
+static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->ext_base + offset);
+}
+
+static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
+{
+ return readl(dmac->ext_base + offset);
+}
+
+static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
+ unsigned int offset, int which)
+{
+ if (which)
+ writel(val, channel->ch_base + offset);
+ else
+ writel(val, channel->ch_cmn_base + offset);
+}
+
+static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
+ unsigned int offset, int which)
+{
+ if (which)
+ return readl(channel->ch_base + offset);
+ else
+ return readl(channel->ch_cmn_base + offset);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
+ struct rz_lmdesc *lmdesc)
+{
+ u32 nxla;
+
+ channel->lmdesc.base = lmdesc;
+ channel->lmdesc.head = lmdesc;
+ channel->lmdesc.tail = lmdesc;
+ nxla = channel->lmdesc.base_dma;
+ while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
+ lmdesc->header = 0;
+ nxla += sizeof(*lmdesc);
+ lmdesc->nxla = nxla;
+ lmdesc++;
+ }
+
+ lmdesc->header = 0;
+ lmdesc->nxla = channel->lmdesc.base_dma;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
+{
+ struct rz_lmdesc *lmdesc = channel->lmdesc.head;
+
+ while (!(lmdesc->header & HEADER_LV)) {
+ lmdesc->header = 0;
+ lmdesc++;
+ if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+ channel->lmdesc.head = lmdesc;
+}
+
+static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+ u32 nxla;
+ u32 chctrl;
+ u32 chstat;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+
+ rz_dmac_lmdesc_recycle(channel);
+
+ nxla = channel->lmdesc.base_dma +
+ (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
+ channel->lmdesc.base));
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (!(chstat & CHSTAT_EN)) {
+ chctrl = (channel->chctrl | CHCTRL_SETEN);
+ rz_dmac_ch_writel(channel, nxla, NXLA, 1);
+ rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
+ rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
+{
+ u32 dmars_offset = (nr / 2) * 4;
+ u32 shift = (nr % 2) * 16;
+ u32 dmars32;
+
+ dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
+ dmars32 &= ~(0xffff << shift);
+ dmars32 |= dmars << shift;
+
+ rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
+}
+
+static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
+ struct rz_dmac_desc *d = channel->desc;
+ u32 chcfg = CHCFG_MEM_COPY;
+
+ /* prepare descriptor */
+ lmdesc->sa = d->src;
+ lmdesc->da = d->dest;
+ lmdesc->tb = d->len;
+ lmdesc->chcfg = chcfg;
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ lmdesc->header = HEADER_LV;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+
+ channel->chcfg = chcfg;
+ channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
+}
+
+static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *d = channel->desc;
+ struct scatterlist *sg, *sgl = d->sg;
+ struct rz_lmdesc *lmdesc;
+ unsigned int i, sg_len = d->sgcount;
+
+ channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
+
+ if (d->direction == DMA_DEV_TO_MEM) {
+ channel->chcfg |= CHCFG_SAD;
+ channel->chcfg &= ~CHCFG_REQD;
+ } else {
+ channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
+ }
+
+ lmdesc = channel->lmdesc.tail;
+
+ for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
+ if (d->direction == DMA_DEV_TO_MEM) {
+ lmdesc->sa = channel->src_per_address;
+ lmdesc->da = sg_dma_address(sg);
+ } else {
+ lmdesc->sa = sg_dma_address(sg);
+ lmdesc->da = channel->dst_per_address;
+ }
+
+ lmdesc->tb = sg_dma_len(sg);
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ if (i == (sg_len - 1)) {
+ lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
+ lmdesc->header = HEADER_LV;
+ } else {
+ lmdesc->chcfg = channel->chcfg;
+ lmdesc->header = HEADER_LV;
+ }
+ if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+
+ channel->lmdesc.tail = lmdesc;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ channel->chctrl = CHCTRL_SETEN;
+}
+
+static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
+{
+ struct rz_dmac_desc *d = chan->desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd)
+ return 0;
+
+ list_del(&vd->node);
+
+ switch (d->type) {
+ case RZ_DMAC_DESC_MEMCPY:
+ rz_dmac_prepare_desc_for_memcpy(chan);
+ break;
+
+ case RZ_DMAC_DESC_SLAVE_SG:
+ rz_dmac_prepare_descs_for_slave_sg(chan);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ rz_dmac_enable_hw(chan);
+
+ return 0;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
+ struct rz_dmac_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ list_add_tail(&desc->node, &channel->ld_free);
+ channel->descs_allocated++;
+ }
+
+ if (!channel->descs_allocated)
+ return -ENOMEM;
+
+ return channel->descs_allocated;
+}
+
+static void rz_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.base;
+ struct rz_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
+ rz_dmac_disable_hw(channel);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+
+ if (channel->mid_rid >= 0) {
+ clear_bit(channel->mid_rid, dmac->modules);
+ channel->mid_rid = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
+ kfree(desc);
+ channel->descs_allocated--;
+ }
+
+ INIT_LIST_HEAD(&channel->ld_free);
+ vchan_free_chan_resources(&channel->vc);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+
+ dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
+ __func__, channel->index, &src, &dest, len);
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ desc->type = RZ_DMAC_DESC_MEMCPY;
+ desc->src = src;
+ desc->dest = dest;
+ desc->len = len;
+ desc->direction = DMA_MEM_TO_MEM;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac_desc *desc;
+ struct scatterlist *sg;
+ int dma_length = 0;
+ int i = 0;
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg_dma_len(sg);
+ }
+
+ desc->type = RZ_DMAC_DESC_SLAVE_SG;
+ desc->sg = sgl;
+ desc->sgcount = sg_len;
+ desc->len = dma_length;
+ desc->direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->src = channel->src_per_address;
+ else
+ desc->dest = channel->dst_per_address;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static int rz_dmac_terminate_all(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ rz_dmac_disable_hw(channel);
+ spin_lock_irqsave(&channel->vc.lock, flags);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+ vchan_get_all_descriptors(&channel->vc, &head);
+ vchan_dma_desc_free_list(&channel->vc, &head);
+
+ return 0;
+}
+
+static void rz_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue,
+ struct rz_dmac_desc, node);
+ channel->desc = desc;
+ if (vchan_issue_pending(&channel->vc)) {
+ if (rz_dmac_xfer_desc(channel) < 0)
+ dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
+ channel->index);
+ else
+ list_move_tail(channel->ld_queue.next,
+ &channel->ld_active);
+ }
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+}
+
+static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
+{
+ u8 i;
+ static const enum dma_slave_buswidth ds_lut[] = {
+ DMA_SLAVE_BUSWIDTH_1_BYTE,
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_SLAVE_BUSWIDTH_8_BYTES,
+ DMA_SLAVE_BUSWIDTH_16_BYTES,
+ DMA_SLAVE_BUSWIDTH_32_BYTES,
+ DMA_SLAVE_BUSWIDTH_64_BYTES,
+ DMA_SLAVE_BUSWIDTH_128_BYTES,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
+ if (ds_lut[i] == ds)
+ return i;
+ }
+
+ return CHCFG_DS_INVALID;
+}
+
+static int rz_dmac_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ u32 val;
+
+ channel->src_per_address = config->src_addr;
+ channel->src_word_size = config->src_addr_width;
+ channel->dst_per_address = config->dst_addr;
+ channel->dst_word_size = config->dst_addr_width;
+
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+
+ return 0;
+}
+
+static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ /*
+ * Place holder
+ * Descriptor allocation is done during alloc_chan_resources and
+ * get freed during free_chan_resources.
+ * list is used to manage the descriptors and avoid any memory
+ * allocation/free during DMA read/write.
+ */
+}
+
+static void rz_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat;
+ int ret;
+
+ ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
+ 100, 100000, false, channel, CHSTAT, 1);
+ if (ret < 0)
+ dev_warn(dmac->dev, "DMA Timeout");
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat, chctrl;
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (chstat & CHSTAT_ER) {
+ dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
+ channel->index, chstat);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ goto done;
+ }
+
+ chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
+done:
+ return;
+}
+
+static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+
+ if (channel) {
+ rz_dmac_irq_handle_channel(channel);
+ return IRQ_WAKE_THREAD;
+ }
+ /* handle DMAERR irq */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+ struct rz_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (list_empty(&channel->ld_active)) {
+ /* Someone might have called terminate all */
+ goto out;
+ }
+
+ desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
+ vchan_cookie_complete(&desc->vd);
+ list_move_tail(channel->ld_active.next, &channel->ld_free);
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
+ node);
+ channel->desc = desc;
+ if (rz_dmac_xfer_desc(channel) == 0)
+ list_move_tail(channel->ld_queue.next, &channel->ld_active);
+ }
+out:
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct of_phandle_args *dma_spec = arg;
+ u32 ch_cfg;
+
+ channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
+ ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
+ channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
+ CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
+
+ return !test_and_set_bit(channel->mid_rid, dmac->modules);
+}
+
+static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rz_dmac_chan_probe(struct rz_dmac *dmac,
+ struct rz_dmac_chan *channel,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ struct rz_lmdesc *lmdesc;
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ channel->index = index;
+ channel->mid_rid = -EINVAL;
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (channel->irq < 0)
+ return channel->irq;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dmac->dev, channel->irq,
+ rz_dmac_irq_handler,
+ rz_dmac_irq_handler_thread, 0,
+ irqname, channel);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ channel->irq, ret);
+ return ret;
+ }
+
+ /* Set io base address for each channel */
+ if (index < 8) {
+ channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
+ EACH_CHANNEL_OFFSET * index;
+ channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
+ } else {
+ channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
+ EACH_CHANNEL_OFFSET * (index - 8);
+ channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
+ }
+
+ /* Allocate descriptors */
+ lmdesc = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ &channel->lmdesc.base_dma, GFP_KERNEL);
+ if (!lmdesc) {
+ dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
+ return -ENOMEM;
+ }
+ rz_lmdesc_setup(channel, lmdesc);
+
+ /* Initialize register for each channel */
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+
+ channel->vc.desc_free = rz_dmac_virt_desc_free;
+ vchan_init(&channel->vc, &dmac->engine);
+ INIT_LIST_HEAD(&channel->ld_queue);
+ INIT_LIST_HEAD(&channel->ld_free);
+ INIT_LIST_HEAD(&channel->ld_active);
+
+ return 0;
+}
+
+static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_dmac_probe(struct platform_device *pdev)
+{
+ const char *irqname = "error";
+ struct dma_device *engine;
+ struct rz_dmac *dmac;
+ int channel_num;
+ unsigned int i;
+ int ret;
+ int irq;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = rz_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources */
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+
+ /* Register interrupt handler for error */
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
+ irqname, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
+ goto err_pm_disable;
+ }
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Register the DMA engine device. */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
+
+ engine->dev = &pdev->dev;
+
+ engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rz_dmac_free_chan_resources;
+ engine->device_tx_status = dma_cookie_status;
+ engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
+ engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
+ engine->device_config = rz_dmac_config;
+ engine->device_terminate_all = rz_dmac_terminate_all;
+ engine->device_issue_pending = rz_dmac_issue_pending;
+ engine->device_synchronize = rz_dmac_device_synchronize;
+
+ engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma_set_max_seg_size(engine->dev, U32_MAX);
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto dma_register_err;
+ }
+ return 0;
+
+dma_register_err:
+ of_dma_controller_free(pdev->dev.of_node);
+err:
+ channel_num = i ? i - 1 : 0;
+ for (i = 0; i < channel_num; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+
+ pm_runtime_put(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int rz_dmac_remove(struct platform_device *pdev)
+{
+ struct rz_dmac *dmac = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,rz-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
+
+static struct platform_driver rz_dmac_driver = {
+ .driver = {
+ .name = "rz-dmac",
+ .of_match_table = of_rz_dmac_match,
+ },
+ .probe = rz_dmac_probe,
+ .remove = rz_dmac_remove,
+};
+
+module_platform_driver(rz_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 000000000..7459f9a13
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_512BIT] = 6, \
+}
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
new file mode 100644
index 000000000..158e5e7de
--- /dev/null
+++ b/drivers/dma/sh/shdma-base.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
+ *
+ * extracted from shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/shdma-base.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/* DMA descriptor control */
+enum shdma_desc_status {
+ DESC_IDLE,
+ DESC_PREPARED,
+ DESC_SUBMITTED,
+ DESC_COMPLETED, /* completed, have to call callback */
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
+};
+
+#define NR_DESCS_PER_CHANNEL 32
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
+
+/*
+ * For slave DMA we assume, that there is a finite number of DMA slaves in the
+ * system, and that each such slave can only use a finite number of channels.
+ * We use slave channel IDs to make sure, that no such slave channel ID is
+ * allocated more than once.
+ */
+static unsigned int slave_num = 256;
+module_param(slave_num, uint, 0444);
+
+/* A bitmask with slave_num bits */
+static unsigned long *shdma_slave_used;
+
+/* Called under spin_lock_irq(&schan->chan_lock") */
+static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *sdesc;
+
+ /* DMA work check */
+ if (ops->channel_busy(schan))
+ return;
+
+ /* Find the first not transferred descriptor */
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->mark == DESC_SUBMITTED) {
+ ops->start_xfer(schan, sdesc);
+ break;
+ }
+}
+
+static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct shdma_desc *chunk, *c, *desc =
+ container_of(tx, struct shdma_desc, async_tx);
+ struct shdma_chan *schan = to_shdma_chan(tx->chan);
+ dma_async_tx_callback callback = tx->callback;
+ dma_cookie_t cookie;
+ bool power_up;
+
+ spin_lock_irq(&schan->chan_lock);
+
+ power_up = list_empty(&schan->ld_queue);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+ /*
+ * All chunks are on the global ld_free, so, we have to find
+ * the end of the chain ourselves
+ */
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
+ chunk->async_tx.cookie > 0 ||
+ chunk->async_tx.cookie == -EBUSY ||
+ &chunk->node == &schan->ld_free))
+ break;
+ chunk->mark = DESC_SUBMITTED;
+ if (chunk->chunks == 1) {
+ chunk->async_tx.callback = callback;
+ chunk->async_tx.callback_param = tx->callback_param;
+ } else {
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ }
+ chunk->cookie = cookie;
+ list_move_tail(&chunk->node, &schan->ld_queue);
+
+ dev_dbg(schan->dev, "submit #%d@%p on %d\n",
+ tx->cookie, &chunk->async_tx, schan->id);
+ }
+
+ if (power_up) {
+ int ret;
+ schan->pm_state = SHDMA_PM_BUSY;
+
+ ret = pm_runtime_get(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ if (ret < 0)
+ dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+
+ pm_runtime_barrier(schan->dev);
+
+ spin_lock_irq(&schan->chan_lock);
+
+ /* Have we been reset, while waiting? */
+ if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
+ struct shdma_dev *sdev =
+ to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ dev_dbg(schan->dev, "Bring up channel %d\n",
+ schan->id);
+ /*
+ * TODO: .xfer_setup() might fail on some platforms.
+ * Make it int then, on error remove chunks from the
+ * queue again
+ */
+ ops->setup_xfer(schan, schan->slave_id);
+
+ if (schan->pm_state == SHDMA_PM_PENDING)
+ shdma_chan_xfer_ld_queue(schan);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ }
+ } else {
+ /*
+ * Tell .device_issue_pending() not to run the queue, interrupts
+ * will do it anyway
+ */
+ schan->pm_state = SHDMA_PM_PENDING;
+ }
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ return cookie;
+}
+
+/* Called with desc_lock held */
+static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
+{
+ struct shdma_desc *sdesc;
+
+ list_for_each_entry(sdesc, &schan->ld_free, node)
+ if (sdesc->mark != DESC_PREPARED) {
+ BUG_ON(sdesc->mark != DESC_IDLE);
+ list_del(&sdesc->node);
+ return sdesc;
+ }
+
+ return NULL;
+}
+
+static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int ret, match;
+
+ if (schan->dev->of_node) {
+ match = schan->hw_req;
+ ret = ops->set_slave(schan, match, slave_addr, true);
+ if (ret < 0)
+ return ret;
+ } else {
+ match = schan->real_slave_id;
+ }
+
+ if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
+ return -EINVAL;
+
+ if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
+ return -EBUSY;
+
+ ret = ops->set_slave(schan, match, slave_addr, false);
+ if (ret < 0) {
+ clear_bit(schan->real_slave_id, shdma_slave_used);
+ return ret;
+ }
+
+ schan->slave_id = schan->real_slave_id;
+
+ return 0;
+}
+
+static int shdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *desc;
+ struct shdma_slave *slave = chan->private;
+ int ret, i;
+
+ /*
+ * This relies on the guarantee from dmaengine that alloc_chan_resources
+ * never runs concurrently with itself or free_chan_resources.
+ */
+ if (slave) {
+ /* Legacy mode: .private is set in filter */
+ schan->real_slave_id = slave->slave_id;
+ ret = shdma_setup_slave(schan, 0);
+ if (ret < 0)
+ goto esetslave;
+ } else {
+ /* Normal mode: real_slave_id was set by filter */
+ schan->slave_id = -EINVAL;
+ }
+
+ schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
+ sdev->desc_size, GFP_KERNEL);
+ if (!schan->desc) {
+ ret = -ENOMEM;
+ goto edescalloc;
+ }
+ schan->desc_num = NR_DESCS_PER_CHANNEL;
+
+ for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
+ desc = ops->embedded_desc(schan->desc, i);
+ dma_async_tx_descriptor_init(&desc->async_tx,
+ &schan->dma_chan);
+ desc->async_tx.tx_submit = shdma_tx_submit;
+ desc->mark = DESC_IDLE;
+
+ list_add(&desc->node, &schan->ld_free);
+ }
+
+ return NR_DESCS_PER_CHANNEL;
+
+edescalloc:
+ if (slave)
+esetslave:
+ clear_bit(slave->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ return ret;
+}
+
+/*
+ * This is the standard shdma filter function to be used as a replacement to the
+ * "old" method, using the .private pointer.
+ * You always have to pass a valid slave id as the argument, old drivers that
+ * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
+ * need to be updated so we can remove the slave_id field from dma_slave_config.
+ * parameter. If this filter is used, the slave driver, after calling
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
+ * .direction, and either .src_addr or .dst_addr set.
+ *
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
+ * capability! If this becomes a requirement, hardware glue drivers, using this
+ * services would have to provide their own filters, which first would check
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
+ * this, and only then, in case of a match, call this common filter.
+ * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
+ * In that case the MID-RID value is used for slave channel filtering and is
+ * passed to this function in the "arg" parameter.
+ */
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct shdma_chan *schan;
+ struct shdma_dev *sdev;
+ int slave_id = (long)arg;
+ int ret;
+
+ /* Only support channels handled by this driver. */
+ if (chan->device->device_alloc_chan_resources !=
+ shdma_alloc_chan_resources)
+ return false;
+
+ schan = to_shdma_chan(chan);
+ sdev = to_shdma_dev(chan->device);
+
+ /*
+ * For DT, the schan->slave_id field is generated by the
+ * set_slave function from the slave ID that is passed in
+ * from xlate. For the non-DT case, the slave ID is
+ * directly passed into the filter function by the driver
+ */
+ if (schan->dev->of_node) {
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
+ if (ret < 0)
+ return false;
+
+ schan->real_slave_id = schan->slave_id;
+ return true;
+ }
+
+ if (slave_id < 0) {
+ /* No slave requested - arbitrary channel */
+ dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
+ return true;
+ }
+
+ if (slave_id >= slave_num)
+ return false;
+
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
+ if (ret < 0)
+ return false;
+
+ schan->real_slave_id = slave_id;
+
+ return true;
+}
+EXPORT_SYMBOL(shdma_chan_filter);
+
+static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ struct shdma_desc *desc, *_desc;
+ /* Is the "exposed" head of a chain acked? */
+ bool head_acked = false;
+ dma_cookie_t cookie = 0;
+ dma_async_tx_callback callback = NULL;
+ struct dmaengine_desc_callback cb;
+ unsigned long flags;
+ LIST_HEAD(cyclic_list);
+
+ memset(&cb, 0, sizeof(cb));
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
+ desc->mark != DESC_COMPLETED &&
+ desc->mark != DESC_WAITING);
+
+ /*
+ * queue is ordered, and we use this loop to (1) clean up all
+ * completed descriptors, and to (2) update descriptor flags of
+ * any chunks in a (partially) completed chain
+ */
+ if (!all && desc->mark == DESC_SUBMITTED &&
+ desc->cookie != cookie)
+ break;
+
+ if (tx->cookie > 0)
+ cookie = tx->cookie;
+
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+ if (schan->dma_chan.completed_cookie != desc->cookie - 1)
+ dev_dbg(schan->dev,
+ "Completing cookie %d, expected %d\n",
+ desc->cookie,
+ schan->dma_chan.completed_cookie + 1);
+ schan->dma_chan.completed_cookie = desc->cookie;
+ }
+
+ /* Call callback on the last chunk */
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
+ desc->mark = DESC_WAITING;
+ dmaengine_desc_get_callback(tx, &cb);
+ callback = tx->callback;
+ dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
+ tx->cookie, tx, schan->id);
+ BUG_ON(desc->chunks != 1);
+ break;
+ }
+
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+ if (desc->mark == DESC_COMPLETED) {
+ BUG_ON(tx->cookie < 0);
+ desc->mark = DESC_WAITING;
+ }
+ head_acked = async_tx_test_ack(tx);
+ } else {
+ switch (desc->mark) {
+ case DESC_COMPLETED:
+ desc->mark = DESC_WAITING;
+ fallthrough;
+ case DESC_WAITING:
+ if (head_acked)
+ async_tx_ack(&desc->async_tx);
+ }
+ }
+
+ dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
+ tx, tx->cookie);
+
+ if (((desc->mark == DESC_COMPLETED ||
+ desc->mark == DESC_WAITING) &&
+ async_tx_test_ack(&desc->async_tx)) || all) {
+
+ if (all || !desc->cyclic) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+ list_move(&desc->node, &schan->ld_free);
+ } else {
+ /* reuse as cyclic */
+ desc->mark = DESC_SUBMITTED;
+ list_move_tail(&desc->node, &cyclic_list);
+ }
+
+ if (list_empty(&schan->ld_queue)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ } else if (schan->pm_state == SHDMA_PM_PENDING) {
+ shdma_chan_xfer_ld_queue(schan);
+ }
+ }
+ }
+
+ if (all && !callback)
+ /*
+ * Terminating and the loop completed normally: forgive
+ * uncompleted cookies
+ */
+ schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
+
+ list_splice_tail(&cyclic_list, &schan->ld_queue);
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ return callback;
+}
+
+/*
+ * shdma_chan_ld_cleanup - Clean up link descriptors
+ *
+ * Clean up the ld_queue of DMA channel.
+ */
+static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ while (__ld_cleanup(schan, all))
+ ;
+}
+
+/*
+ * shdma_free_chan_resources - Free all resources of the channel.
+ */
+static void shdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ LIST_HEAD(list);
+
+ /* Protect against ISR */
+ spin_lock_irq(&schan->chan_lock);
+ ops->halt_channel(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ /* Now no new interrupts will occur */
+
+ /* Prepared and not submitted descriptors can still be on the queue */
+ if (!list_empty(&schan->ld_queue))
+ shdma_chan_ld_cleanup(schan, true);
+
+ if (schan->slave_id >= 0) {
+ /* The caller is holding dma_list_mutex */
+ clear_bit(schan->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ }
+
+ schan->real_slave_id = 0;
+
+ spin_lock_irq(&schan->chan_lock);
+
+ list_splice_init(&schan->ld_free, &list);
+ schan->desc_num = 0;
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ kfree(schan->desc);
+}
+
+/**
+ * shdma_add_desc - get, set up and return one transfer descriptor
+ * @schan: DMA channel
+ * @flags: DMA transfer flags
+ * @dst: destination DMA address, incremented when direction equals
+ * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
+ * @src: source DMA address, incremented when direction equals
+ * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
+ * @len: DMA transfer length
+ * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
+ * @direction: needed for slave DMA to decide which address to keep constant,
+ * equals DMA_MEM_TO_MEM for MEMCPY
+ * Returns 0 or an error
+ * Locks: called with desc_lock held
+ */
+static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
+ unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
+ struct shdma_desc **first, enum dma_transfer_direction direction)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *new;
+ size_t copy_size = *len;
+
+ if (!copy_size)
+ return NULL;
+
+ /* Allocate the link descriptor from the free list */
+ new = shdma_get_desc(schan);
+ if (!new) {
+ dev_err(schan->dev, "No free link descriptor available\n");
+ return NULL;
+ }
+
+ ops->desc_setup(schan, new, *src, *dst, &copy_size);
+
+ if (!*first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
+ *first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
+
+ dev_dbg(schan->dev,
+ "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
+ copy_size, *len, src, dst, &new->async_tx,
+ new->async_tx.cookie);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->direction = direction;
+ new->partial = 0;
+
+ *len -= copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
+ *src += copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
+ *dst += copy_size;
+
+ return new;
+}
+
+/*
+ * shdma_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
+ struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
+ enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
+{
+ struct scatterlist *sg;
+ struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
+ LIST_HEAD(tx_list);
+ int chunks = 0;
+ unsigned long irq_flags;
+ int i;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
+
+ /* Have to lock the whole loop to protect against concurrent release */
+ spin_lock_irqsave(&schan->chan_lock, irq_flags);
+
+ /*
+ * Chaining:
+ * first descriptor is what user is dealing with in all API calls, its
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
+ * number
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
+ * all chunks are linked onto the tx_list head with their .node heads
+ * only during this function, then they are immediately spliced
+ * back onto the free list in form of a chain
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(sg);
+ size_t len = sg_dma_len(sg);
+
+ if (!len)
+ goto err_get_desc;
+
+ do {
+ dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
+ i, sg, len, &sg_addr);
+
+ if (direction == DMA_DEV_TO_MEM)
+ new = shdma_add_desc(schan, flags,
+ &sg_addr, addr, &len, &first,
+ direction);
+ else
+ new = shdma_add_desc(schan, flags,
+ addr, &sg_addr, &len, &first,
+ direction);
+ if (!new)
+ goto err_get_desc;
+
+ new->cyclic = cyclic;
+ if (cyclic)
+ new->chunks = 1;
+ else
+ new->chunks = chunks--;
+ list_add_tail(&new->node, &tx_list);
+ } while (len);
+ }
+
+ if (new != first)
+ new->async_tx.cookie = -ENOSPC;
+
+ /* Put them back on the free list, so, they don't get lost */
+ list_splice_tail(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return &first->async_tx;
+
+err_get_desc:
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct scatterlist sg;
+
+ if (!chan || !len)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
+ offset_in_page(dma_src));
+ sg_dma_address(&sg) = dma_src;
+ sg_dma_len(&sg) = len;
+
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
+ flags, false);
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || !sg_len) {
+ dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags, false);
+}
+
+#define SHDMA_MAX_SG_LEN 32
+
+static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ struct dma_async_tx_descriptor *desc;
+ const struct shdma_ops *ops = sdev->ops;
+ unsigned int sg_len = buf_len / period_len;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+ struct scatterlist *sgl;
+ int i;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ if (sg_len > SHDMA_MAX_SG_LEN) {
+ dev_err(schan->dev, "sg length %d exceeds limit %d",
+ sg_len, SHDMA_MAX_SG_LEN);
+ return NULL;
+ }
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || (buf_len < period_len)) {
+ dev_warn(schan->dev,
+ "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+ __func__, buf_len, period_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ /*
+ * Allocate the sg list dynamically as it would consumer too much stack
+ * space.
+ */
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, sg_len);
+
+ for (i = 0; i < sg_len; i++) {
+ dma_addr_t src = buf_addr + (period_len * i);
+
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+ offset_in_page(src));
+ sg_dma_address(&sgl[i]) = src;
+ sg_dma_len(&sgl[i]) = period_len;
+ }
+
+ desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags, true);
+
+ kfree(sgl);
+ return desc;
+}
+
+static int shdma_terminate_all(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ ops->halt_channel(schan);
+
+ if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+ /* Record partial transfer */
+ struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+ struct shdma_desc, node);
+ desc->partial = ops->get_partial(schan, desc);
+ }
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ shdma_chan_ld_cleanup(schan, true);
+
+ return 0;
+}
+
+static int shdma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+
+ /*
+ * So far only .slave_id is used, but the slave drivers are
+ * encouraged to also set a transfer direction and an address.
+ */
+ if (!config)
+ return -EINVAL;
+
+ /*
+ * We could lock this, but you shouldn't be configuring the
+ * channel, while using it...
+ */
+ return shdma_setup_slave(schan,
+ config->direction == DMA_DEV_TO_MEM ?
+ config->src_addr : config->dst_addr);
+}
+
+static void shdma_issue_pending(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+
+ spin_lock_irq(&schan->chan_lock);
+ if (schan->pm_state == SHDMA_PM_ESTABLISHED)
+ shdma_chan_xfer_ld_queue(schan);
+ else
+ schan->pm_state = SHDMA_PM_PENDING;
+ spin_unlock_irq(&schan->chan_lock);
+}
+
+static enum dma_status shdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ enum dma_status status;
+ unsigned long flags;
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+
+ status = dma_cookie_status(chan, cookie, txstate);
+
+ /*
+ * If we don't find cookie on the queue, it has been aborted and we have
+ * to report error
+ */
+ if (status != DMA_COMPLETE) {
+ struct shdma_desc *sdesc;
+ status = DMA_ERROR;
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->cookie == cookie) {
+ status = DMA_IN_PROGRESS;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ return status;
+}
+
+/* Called from error IRQ or NMI */
+bool shdma_reset(struct shdma_dev *sdev)
+{
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_chan *schan;
+ unsigned int handled = 0;
+ int i;
+
+ /* Reset all channels */
+ shdma_for_each_chan(schan, sdev, i) {
+ struct shdma_desc *sdesc;
+ LIST_HEAD(dl);
+
+ if (!schan)
+ continue;
+
+ spin_lock(&schan->chan_lock);
+
+ /* Stop the channel */
+ ops->halt_channel(schan);
+
+ list_splice_init(&schan->ld_queue, &dl);
+
+ if (!list_empty(&dl)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ }
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ spin_unlock(&schan->chan_lock);
+
+ /* Complete all */
+ list_for_each_entry(sdesc, &dl, node) {
+ struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+
+ sdesc->mark = DESC_IDLE;
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ }
+
+ spin_lock(&schan->chan_lock);
+ list_splice(&dl, &schan->ld_free);
+ spin_unlock(&schan->chan_lock);
+
+ handled++;
+ }
+
+ return !!handled;
+}
+EXPORT_SYMBOL(shdma_reset);
+
+static irqreturn_t chan_irq(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ irqreturn_t ret;
+
+ spin_lock(&schan->chan_lock);
+
+ ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
+
+ spin_unlock(&schan->chan_lock);
+
+ return ret;
+}
+
+static irqreturn_t chan_irqt(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ struct shdma_desc *sdesc;
+
+ spin_lock_irq(&schan->chan_lock);
+ list_for_each_entry(sdesc, &schan->ld_queue, node) {
+ if (sdesc->mark == DESC_SUBMITTED &&
+ ops->desc_completed(schan, sdesc)) {
+ dev_dbg(schan->dev, "done #%d@%p\n",
+ sdesc->async_tx.cookie, &sdesc->async_tx);
+ sdesc->mark = DESC_COMPLETED;
+ break;
+ }
+ }
+ /* Next desc */
+ shdma_chan_xfer_ld_queue(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ return IRQ_HANDLED;
+}
+
+int shdma_request_irq(struct shdma_chan *schan, int irq,
+ unsigned long flags, const char *name)
+{
+ int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+ chan_irqt, flags, name, schan);
+
+ schan->irq = ret < 0 ? ret : irq;
+
+ return ret;
+}
+EXPORT_SYMBOL(shdma_request_irq);
+
+void shdma_chan_probe(struct shdma_dev *sdev,
+ struct shdma_chan *schan, int id)
+{
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ /* reference struct dma_device */
+ schan->dma_chan.device = &sdev->dma_dev;
+ dma_cookie_init(&schan->dma_chan);
+
+ schan->dev = sdev->dma_dev.dev;
+ schan->id = id;
+
+ if (!schan->max_xfer_len)
+ schan->max_xfer_len = PAGE_SIZE;
+
+ spin_lock_init(&schan->chan_lock);
+
+ /* Init descripter manage list */
+ INIT_LIST_HEAD(&schan->ld_queue);
+ INIT_LIST_HEAD(&schan->ld_free);
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&schan->dma_chan.device_node,
+ &sdev->dma_dev.channels);
+ sdev->schan[id] = schan;
+}
+EXPORT_SYMBOL(shdma_chan_probe);
+
+void shdma_chan_remove(struct shdma_chan *schan)
+{
+ list_del(&schan->dma_chan.device_node);
+}
+EXPORT_SYMBOL(shdma_chan_remove);
+
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
+ int chan_num)
+{
+ struct dma_device *dma_dev = &sdev->dma_dev;
+
+ /*
+ * Require all call-backs for now, they can trivially be made optional
+ * later as required
+ */
+ if (!sdev->ops ||
+ !sdev->desc_size ||
+ !sdev->ops->embedded_desc ||
+ !sdev->ops->start_xfer ||
+ !sdev->ops->setup_xfer ||
+ !sdev->ops->set_slave ||
+ !sdev->ops->desc_setup ||
+ !sdev->ops->slave_addr ||
+ !sdev->ops->channel_busy ||
+ !sdev->ops->halt_channel ||
+ !sdev->ops->desc_completed)
+ return -EINVAL;
+
+ sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
+ if (!sdev->schan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Common and MEMCPY operations */
+ dma_dev->device_alloc_chan_resources
+ = shdma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = shdma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
+ dma_dev->device_tx_status = shdma_tx_status;
+ dma_dev->device_issue_pending = shdma_issue_pending;
+
+ /* Compulsory for DMA_SLAVE fields */
+ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
+ dma_dev->device_config = shdma_config;
+ dma_dev->device_terminate_all = shdma_terminate_all;
+
+ dma_dev->dev = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(shdma_init);
+
+void shdma_cleanup(struct shdma_dev *sdev)
+{
+ kfree(sdev->schan);
+}
+EXPORT_SYMBOL(shdma_cleanup);
+
+static int __init shdma_enter(void)
+{
+ shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
+ if (!shdma_slave_used)
+ return -ENOMEM;
+ return 0;
+}
+module_init(shdma_enter);
+
+static void __exit shdma_exit(void)
+{
+ bitmap_free(shdma_slave_used);
+}
+module_exit(shdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SH-DMA driver base library");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
new file mode 100644
index 000000000..9c121a4b3
--- /dev/null
+++ b/drivers/dma/sh/shdma.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ *
+ */
+#ifndef __DMA_SHDMA_H
+#define __DMA_SHDMA_H
+
+#include <linux/sh_dma.h>
+#include <linux/shdma-base.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+
+#define SH_DMAE_MAX_CHANNELS 20
+#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
+
+struct device;
+
+struct sh_dmae_chan {
+ struct shdma_chan shdma_chan;
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+ char dev_id[16]; /* unique name per DMAC of channel */
+ int pm_error;
+ dma_addr_t slave_addr;
+};
+
+struct sh_dmae_device {
+ struct shdma_dev shdma_dev;
+ struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
+ const struct sh_dmae_pdata *pdata;
+ struct list_head node;
+ void __iomem *chan_reg;
+ void __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
+};
+
+struct sh_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_dmae_desc {
+ struct sh_dmae_regs hw;
+ struct shdma_desc shdma_desc;
+};
+
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
+#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
+#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
+ struct sh_dmae_device, shdma_dev.dma_dev)
+
+#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
new file mode 100644
index 000000000..5aafe548c
--- /dev/null
+++ b/drivers/dma/sh/shdmac.c
@@ -0,0 +1,938 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * base is drivers/dma/flsdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - MAX DMA size is 16MB.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rculist.h>
+#include <linux/sh_dma.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "shdma.h"
+
+/* DMA registers */
+#define SAR 0x00 /* Source Address Register */
+#define DAR 0x04 /* Destination Address Register */
+#define TCR 0x08 /* Transfer Count Register */
+#define CHCR 0x0C /* Channel Control Register */
+#define DMAOR 0x40 /* DMA Operation Register */
+
+#define TEND 0x18 /* USB-DMAC */
+
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define SH_DMA_SLAVE_NUMBER 256
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
+
+/*
+ * Used for write-side mutual exclusion for the global device list,
+ * read-side synchronization by way of RCU, and per-controller data.
+ */
+static DEFINE_SPINLOCK(sh_dmae_lock);
+static LIST_HEAD(sh_dmae_devices);
+
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ * channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ * corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+ const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+ sh_dc->shdma_chan.id;
+ u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
+
+ __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
+}
+
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+{
+ __raw_writel(data, sh_dc->base + reg);
+}
+
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+{
+ return __raw_readl(sh_dc->base + reg);
+}
+
+static u16 dmaor_read(struct sh_dmae_device *shdev)
+{
+ void __iomem *addr = shdev->chan_reg + DMAOR;
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
+}
+
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+{
+ void __iomem *addr = shdev->chan_reg + DMAOR;
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset);
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset);
+}
+
+/*
+ * Reset DMA controller
+ *
+ * SH7780 has two DMAOR register
+ */
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev);
+ dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+}
+
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ channel_clear(sh_chan);
+ }
+ }
+
+ dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
+
+ dmaor = dmaor_read(shdev);
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+
+ if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
+ dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
+ return -EIO;
+ }
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->shdma_dev.dma_dev.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
+ return 0;
+}
+
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = chcr_read(sh_chan);
+
+ if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+ return true; /* working */
+
+ return false; /* waiting */
+}
+
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
+ int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+ ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+
+ if (cnt >= pdata->ts_shift_num)
+ cnt = 0;
+
+ return pdata->ts_shift[cnt];
+}
+
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
+ int i;
+
+ for (i = 0; i < pdata->ts_shift_num; i++)
+ if (pdata->ts_shift[i] == l2size)
+ break;
+
+ if (i == pdata->ts_shift_num)
+ i = 0;
+
+ return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+ ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
+}
+
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
+{
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
+ sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
+}
+
+static void dmae_start(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
+
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
+}
+
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+ /*
+ * Default configuration for dual address memory-memory transfer.
+ */
+ u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
+ LOG2_DEFAULT_XFER_SIZE);
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+ chcr_write(sh_chan, chcr);
+}
+
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+{
+ /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
+ chcr_write(sh_chan, val);
+
+ return 0;
+}
+
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
+ void __iomem *addr = shdev->dmars;
+ unsigned int shift = chan_pdata->dmars_bit;
+
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ if (pdata->no_dmars)
+ return 0;
+
+ /* in the case of a missing DMARS resource use first memory window */
+ if (!addr)
+ addr = shdev->chan_reg;
+ addr += chan_pdata->dmars;
+
+ __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+ addr);
+
+ return 0;
+}
+
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+ sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+ sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+ /* Get the ld start address from ld_queue */
+ dmae_set_reg(sh_chan, &sh_desc->hw);
+ dmae_start(sh_chan);
+}
+
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ return dmae_is_busy(sh_chan);
+}
+
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+ int slave_id)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg =
+ sh_chan->config;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+}
+
+/*
+ * Find a slave channel configuration from the contoller list by either a slave
+ * ID in the non-DT case, or by a MID/RID value in the DT case
+ */
+static const struct sh_dmae_slave_config *dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, int match)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_slave_config *cfg;
+ int i;
+
+ if (!sh_chan->shdma_chan.dev->of_node) {
+ if (match >= SH_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == match)
+ return cfg;
+ } else {
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->mid_rid == match) {
+ sh_chan->shdma_chan.slave_id = i;
+ return cfg;
+ }
+ }
+
+ return NULL;
+}
+
+static int sh_dmae_set_slave(struct shdma_chan *schan,
+ int slave_id, dma_addr_t slave_addr, bool try)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
+ if (!cfg)
+ return -ENXIO;
+
+ if (!try) {
+ sh_chan->config = cfg;
+ sh_chan->slave_addr = slave_addr ? : cfg->addr;
+ }
+
+ return 0;
+}
+
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
+}
+
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+
+ if (*len > schan->max_xfer_len)
+ *len = schan->max_xfer_len;
+
+ sh_desc->hw.sar = src;
+ sh_desc->hw.dar = dst;
+ sh_desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static void sh_dmae_halt(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ dmae_halt(sh_chan);
+}
+
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (!(chcr_read(sh_chan) & CHCR_TE))
+ return false;
+
+ /* DMA stop */
+ dmae_halt(sh_chan);
+
+ return true;
+}
+
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ return sh_desc->hw.tcr -
+ (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
+}
+
+/* Called from error IRQ or NMI */
+static bool sh_dmae_reset(struct sh_dmae_device *shdev)
+{
+ bool ret;
+
+ /* halt the dma controller */
+ sh_dmae_ctl_stop(shdev);
+
+ /* We cannot detect, which channel caused the error, have to reset all */
+ ret = shdma_reset(&shdev->shdma_dev);
+
+ sh_dmae_rst(shdev);
+
+ return ret;
+}
+
+static irqreturn_t sh_dmae_err(int irq, void *data)
+{
+ struct sh_dmae_device *shdev = data;
+
+ if (!(dmaor_read(shdev) & DMAOR_AE))
+ return IRQ_NONE;
+
+ sh_dmae_reset(shdev);
+ return IRQ_HANDLED;
+}
+
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+ u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
+
+ return (sdesc->direction == DMA_DEV_TO_MEM &&
+ (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+ (sdesc->direction != DMA_DEV_TO_MEM &&
+ (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
+}
+
+static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
+{
+ /* Fast path out if NMIF is not asserted for this controller */
+ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
+ return false;
+
+ return sh_dmae_reset(shdev);
+}
+
+static int sh_dmae_nmi_handler(struct notifier_block *self,
+ unsigned long cmd, void *data)
+{
+ struct sh_dmae_device *shdev;
+ int ret = NOTIFY_DONE;
+ bool triggered;
+
+ /*
+ * Only concern ourselves with NMI events.
+ *
+ * Normally we would check the die chain value, but as this needs
+ * to be architecture independent, check for NMI context instead.
+ */
+ if (!in_nmi())
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
+ /*
+ * Only stop if one of the controllers has NMIF asserted,
+ * we do not want to interfere with regular address error
+ * handling or NMI events that don't concern the DMACs.
+ */
+ triggered = sh_dmae_nmi_notify(shdev);
+ if (triggered == true)
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+ .notifier_call = sh_dmae_nmi_handler,
+
+ /* Run before NMI debug handler and KGDB */
+ .priority = 1,
+};
+
+static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+ int irq, unsigned long flags)
+{
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ struct shdma_dev *sdev = &shdev->shdma_dev;
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+ struct sh_dmae_chan *sh_chan;
+ struct shdma_chan *schan;
+ int err;
+
+ sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+ GFP_KERNEL);
+ if (!sh_chan)
+ return -ENOMEM;
+
+ schan = &sh_chan->shdma_chan;
+ schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset;
+
+ /* set up channel irq */
+ if (pdev->id >= 0)
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dma%d", id);
+
+ err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
+ if (err) {
+ dev_err(sdev->dma_dev.dev,
+ "DMA channel %d request_irq error %d\n",
+ id, err);
+ goto err_no_irq;
+ }
+
+ shdev->chan[id] = sh_chan;
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ shdma_chan_remove(schan);
+ return err;
+}
+
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+{
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+ BUG_ON(!schan);
+
+ shdma_chan_remove(schan);
+ }
+}
+
+#ifdef CONFIG_PM
+static int sh_dmae_runtime_suspend(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+ sh_dmae_ctl_stop(shdev);
+ return 0;
+}
+
+static int sh_dmae_runtime_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+ return sh_dmae_rst(shdev);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int sh_dmae_suspend(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+ sh_dmae_ctl_stop(shdev);
+ return 0;
+}
+
+static int sh_dmae_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
+
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+
+ if (!sh_chan->shdma_chan.desc_num)
+ continue;
+
+ if (sh_chan->shdma_chan.slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg = sh_chan->config;
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sh_dmae_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
+ SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
+ NULL)
+};
+
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+
+ /*
+ * Implicit BUG_ON(!sh_chan->config)
+ * This is an exclusive slave DMA operation, may only be called after a
+ * successful slave configuration.
+ */
+ return sh_chan->slave_addr;
+}
+
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sh_dmae_shdma_ops = {
+ .desc_completed = sh_dmae_desc_completed,
+ .halt_channel = sh_dmae_halt,
+ .channel_busy = sh_dmae_channel_busy,
+ .slave_addr = sh_dmae_slave_addr,
+ .desc_setup = sh_dmae_desc_setup,
+ .set_slave = sh_dmae_set_slave,
+ .setup_xfer = sh_dmae_setup_xfer,
+ .start_xfer = sh_dmae_start_xfer,
+ .embedded_desc = sh_dmae_embedded_desc,
+ .chan_irq = sh_dmae_chan_irq,
+ .get_partial = sh_dmae_get_partial,
+};
+
+static int sh_dmae_probe(struct platform_device *pdev)
+{
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
+ DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
+ const struct sh_dmae_pdata *pdata;
+ unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+ int chan_irq[SH_DMAE_MAX_CHANNELS];
+ unsigned long irqflags = 0;
+ int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+ struct sh_dmae_device *shdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+
+ if (pdev->dev.of_node)
+ pdata = of_device_get_match_data(&pdev->dev);
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
+ /* get platform data */
+ if (!pdata || !pdata->channel_num)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* DMARS area is optional */
+ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /*
+ * IRQ resources:
+ * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+ * the error IRQ, in which case it is the only IRQ in this resource:
+ * start == end. If it is the only IRQ resource, all channels also
+ * use the same IRQ.
+ * 2. DMA channel IRQ resources can be specified one per resource or in
+ * ranges (start != end)
+ * 3. iff all events (channels and, optionally, error) on this
+ * controller use the same IRQ, only one IRQ resource can be
+ * specified, otherwise there must be one IRQ per channel, even if
+ * some of them are equal
+ * 4. if all IRQs on this controller are equal or if some specific IRQs
+ * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+ * requested with the IRQF_SHARED flag
+ */
+ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !errirq_res)
+ return -ENODEV;
+
+ shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+ GFP_KERNEL);
+ if (!shdev)
+ return -ENOMEM;
+
+ dma_dev = &shdev->shdma_dev.dma_dev;
+
+ shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(shdev->chan_reg))
+ return PTR_ERR(shdev->chan_reg);
+ if (dmars) {
+ shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+ if (IS_ERR(shdev->dmars))
+ return PTR_ERR(shdev->dmars);
+ }
+
+ dma_dev->src_addr_widths = widths;
+ dma_dev->dst_addr_widths = widths;
+ dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ if (!pdata->slave_only)
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ if (pdata->slave && pdata->slave_num)
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+
+ shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+ shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+ err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+ pdata->channel_num);
+ if (err < 0)
+ goto eshdma;
+
+ /* platform data */
+ shdev->pdata = pdata;
+
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
+ platform_set_drvdata(pdev, shdev);
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ /* reset dma controller - only needed as a test */
+ err = sh_dmae_rst(shdev);
+ if (err)
+ goto rst_err;
+
+ if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!chanirq_res)
+ chanirq_res = errirq_res;
+ else
+ irqres++;
+
+ if (chanirq_res == errirq_res ||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+ irqflags = IRQF_SHARED;
+
+ errirq = errirq_res->start;
+
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
+ irqflags, "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA failed requesting irq #%d, error %d\n",
+ errirq, err);
+ goto eirq_err;
+ }
+ } else {
+ chanirq_res = errirq_res;
+ }
+
+ if (chanirq_res->start == chanirq_res->end &&
+ !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+ /* Special case - all multiplexed */
+ for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+ if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
+ chan_irq[irq_cnt] = chanirq_res->start;
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ } else {
+ irq_cap = 1;
+ break;
+ }
+ }
+ } else {
+ do {
+ for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
+ irq_cap = 1;
+ break;
+ }
+
+ if ((errirq_res->flags & IORESOURCE_BITS) ==
+ IORESOURCE_IRQ_SHAREABLE)
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ else
+ chan_flag[irq_cnt] = 0;
+ dev_dbg(&pdev->dev,
+ "Found IRQ %d for channel %d\n",
+ i, irq_cnt);
+ chan_irq[irq_cnt++] = i;
+ }
+
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
+ break;
+
+ chanirq_res = platform_get_resource(pdev,
+ IORESOURCE_IRQ, ++irqres);
+ } while (irq_cnt < pdata->channel_num && chanirq_res);
+ }
+
+ /* Create DMA Channel */
+ for (i = 0; i < irq_cnt; i++) {
+ err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ if (irq_cap)
+ dev_notice(&pdev->dev, "Attempting to register %d DMA "
+ "channels when a maximum of %d are supported.\n",
+ pdata->channel_num, SH_DMAE_MAX_CHANNELS);
+
+ pm_runtime_put(&pdev->dev);
+
+ err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+ if (err < 0)
+ goto edmadevreg;
+
+ return err;
+
+edmadevreg:
+ pm_runtime_get(&pdev->dev);
+
+chan_probe_err:
+ sh_dmae_chan_remove(shdev);
+
+eirq_err:
+rst_err:
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ shdma_cleanup(&shdev->shdma_dev);
+eshdma:
+ synchronize_rcu();
+
+ return err;
+}
+
+static int sh_dmae_remove(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_disable(&pdev->dev);
+
+ sh_dmae_chan_remove(shdev);
+ shdma_cleanup(&shdev->shdma_dev);
+
+ synchronize_rcu();
+
+ return 0;
+}
+
+static struct platform_driver sh_dmae_driver = {
+ .driver = {
+ .pm = &sh_dmae_pm,
+ .name = SH_DMAE_DRV_NAME,
+ },
+ .remove = sh_dmae_remove,
+};
+
+static int __init sh_dmae_init(void)
+{
+ /* Wire up NMI handling */
+ int err = register_die_notifier(&sh_dmae_nmi_notifier);
+ if (err)
+ return err;
+
+ return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+}
+module_init(sh_dmae_init);
+
+static void __exit sh_dmae_exit(void)
+{
+ platform_driver_unregister(&sh_dmae_driver);
+
+ unregister_die_notifier(&sh_dmae_nmi_notifier);
+}
+module_exit(sh_dmae_exit);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
new file mode 100644
index 000000000..5edaeb89d
--- /dev/null
+++ b/drivers/dma/sh/usb-dmac.c
@@ -0,0 +1,914 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas USB DMA Controller Driver
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * based on rcar-dmac.c
+ * Copyright (C) 2014 Renesas Electronics Inc.
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/*
+ * struct usb_dmac_sg - Descriptor for a hardware transfer
+ * @mem_addr: memory address
+ * @size: transfer size in bytes
+ */
+struct usb_dmac_sg {
+ dma_addr_t mem_addr;
+ u32 size;
+};
+
+/*
+ * struct usb_dmac_desc - USB DMA Transfer Descriptor
+ * @vd: base virtual channel DMA transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @sg_allocated_len: length of allocated sg
+ * @sg_len: length of sg
+ * @sg_index: index of sg
+ * @residue: residue after the DMAC completed a transfer
+ * @node: node for desc_got and desc_freed
+ * @done_cookie: cookie after the DMAC completed a transfer
+ * @sg: information for the transfer
+ */
+struct usb_dmac_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction direction;
+ unsigned int sg_allocated_len;
+ unsigned int sg_len;
+ unsigned int sg_index;
+ u32 residue;
+ struct list_head node;
+ dma_cookie_t done_cookie;
+ struct usb_dmac_sg sg[];
+};
+
+#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
+
+/*
+ * struct usb_dmac_chan - USB DMA Controller Channel
+ * @vc: base virtual DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @irq: irq number of this channel
+ * @desc: the current descriptor
+ * @descs_allocated: number of descriptors allocated
+ * @desc_got: got descriptors
+ * @desc_freed: freed descriptors after the DMAC completed a transfer
+ */
+struct usb_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *iomem;
+ unsigned int index;
+ int irq;
+ struct usb_dmac_desc *desc;
+ int descs_allocated;
+ struct list_head desc_got;
+ struct list_head desc_freed;
+};
+
+#define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
+
+/*
+ * struct usb_dmac - USB DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ */
+struct usb_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *iomem;
+
+ unsigned int n_channels;
+ struct usb_dmac_chan *channels;
+};
+
+#define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
+
+#define USB_DMASWR 0x0008
+#define USB_DMASWR_SWR (1 << 0)
+#define USB_DMAOR 0x0060
+#define USB_DMAOR_AE (1 << 1)
+#define USB_DMAOR_DME (1 << 0)
+
+#define USB_DMASAR 0x0000
+#define USB_DMADAR 0x0004
+#define USB_DMATCR 0x0008
+#define USB_DMATCR_MASK 0x00ffffff
+#define USB_DMACHCR 0x0014
+#define USB_DMACHCR_FTE (1 << 24)
+#define USB_DMACHCR_NULLE (1 << 16)
+#define USB_DMACHCR_NULL (1 << 12)
+#define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
+#define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
+#define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
+#define USB_DMACHCR_IE (1 << 5)
+#define USB_DMACHCR_SP (1 << 2)
+#define USB_DMACHCR_TE (1 << 1)
+#define USB_DMACHCR_DE (1 << 0)
+#define USB_DMATEND 0x0018
+
+/* Hardcode the xfer_shift to 5 (32bytes) */
+#define USB_DMAC_XFER_SHIFT 5
+#define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
+#define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
+#define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
+
+/* for descriptors */
+#define USB_DMAC_INITIAL_NR_DESC 16
+#define USB_DMAC_INITIAL_NR_SG 8
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
+{
+ writel(data, dmac->iomem + reg);
+}
+
+static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
+{
+ return readl(dmac->iomem + reg);
+}
+
+static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
+{
+ return readl(chan->iomem + reg);
+}
+
+static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
+{
+ writel(data, chan->iomem + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
+}
+
+static u32 usb_dmac_calc_tend(u32 size)
+{
+ /*
+ * Please refer to the Figure "Example of Final Transaction Valid
+ * Data Transfer Enable (EDTEN) Setting" in the data sheet.
+ */
+ return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
+ USB_DMAC_XFER_SIZE));
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
+ unsigned int index)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+ struct usb_dmac_sg *sg = desc->sg + index;
+ dma_addr_t src_addr = 0, dst_addr = 0;
+
+ WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
+
+ if (desc->direction == DMA_DEV_TO_MEM)
+ dst_addr = sg->mem_addr;
+ else
+ src_addr = sg->mem_addr;
+
+ dev_dbg(chan->vc.chan.device->dev,
+ "chan%u: queue sg %p: %u@%pad -> %pad\n",
+ chan->index, sg, sg->size, &src_addr, &dst_addr);
+
+ usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMATCR,
+ DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
+ usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
+
+ usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
+ USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ chan->desc = NULL;
+ return;
+ }
+
+ /*
+ * Remove this request from vc->desc_issued. Otherwise, this driver
+ * will get the previous value from vchan_next_desc() after a transfer
+ * was completed.
+ */
+ list_del(&vd->node);
+
+ chan->desc = to_usb_dmac_desc(vd);
+ chan->desc->sg_index = 0;
+ usb_dmac_chan_start_sg(chan, 0);
+}
+
+static int usb_dmac_init(struct usb_dmac *dmac)
+{
+ u16 dmaor;
+
+ /* Clear all channels and enable the DMAC globally. */
+ usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
+
+ dmaor = usb_dmac_read(dmac, USB_DMAOR);
+ if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
+ dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
+ gfp_t gfp)
+{
+ struct usb_dmac_desc *desc;
+ unsigned long flags;
+
+ desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->sg_allocated_len = sg_len;
+ INIT_LIST_HEAD(&desc->node);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_add_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ list_splice_init(&chan->desc_freed, &list);
+ list_splice_init(&chan->desc_got, &list);
+
+ list_for_each_entry_safe(desc, _desc, &list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+ chan->descs_allocated = 0;
+}
+
+static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
+ unsigned int sg_len, gfp_t gfp)
+{
+ struct usb_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ /* Get a freed descritpor */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_for_each_entry(desc, &chan->desc_freed, node) {
+ if (sg_len <= desc->sg_allocated_len) {
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* Allocate a new descriptor */
+ if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
+ /* If allocated the desc, it was added to tail of the list */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
+ node);
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+
+ return NULL;
+}
+
+static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_move_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+
+static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
+{
+ struct dma_chan *chan = &uchan->vc.chan;
+ struct usb_dmac *dmac = to_usb_dmac(chan->device);
+ int i;
+
+ /* Don't issue soft reset if any one of channels is busy */
+ for (i = 0; i < dmac->n_channels; ++i) {
+ if (usb_dmac_chan_is_busy(uchan))
+ return;
+ }
+
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+ usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
+ udelay(100);
+ usb_dmac_write(dmac, USB_DMASWR, 0);
+ usb_dmac_write(dmac, USB_DMAOR, 1);
+}
+
+static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
+
+ usb_dmac_soft_reset(chan);
+}
+
+static void usb_dmac_stop(struct usb_dmac *dmac)
+{
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ int ret;
+
+ while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
+ ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
+ GFP_KERNEL);
+ if (ret < 0) {
+ usb_dmac_desc_free(uchan);
+ return ret;
+ }
+ uchan->descs_allocated++;
+ }
+
+ return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void usb_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ /* Protect against ISR */
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ usb_dmac_desc_free(uchan);
+ vchan_free_chan_resources(&uchan->vc);
+
+ pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long dma_flags, void *context)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc;
+ struct scatterlist *sg;
+ int i;
+
+ if (!sg_len) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: len=%d\n", __func__, sg_len);
+ return NULL;
+ }
+
+ desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->direction = dir;
+ desc->sg_len = sg_len;
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].mem_addr = sg_dma_address(sg);
+ desc->sg[i].size = sg_dma_len(sg);
+ }
+
+ return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
+}
+
+static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ LIST_HEAD(head);
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ vchan_get_all_descriptors(&uchan->vc, &head);
+ if (uchan->desc)
+ uchan->desc = NULL;
+ list_splice_init(&uchan->desc_got, &list);
+ list_for_each_entry_safe(desc, _desc, &list, node)
+ list_move_tail(&desc->node, &uchan->desc_freed);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&uchan->vc, &head);
+
+ return 0;
+}
+
+static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc,
+ unsigned int sg_index)
+{
+ struct usb_dmac_sg *sg = desc->sg + sg_index;
+ u32 mem_addr = sg->mem_addr & 0xffffffff;
+ unsigned int residue = sg->size;
+
+ /*
+ * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
+ * has unsuited value to calculate.
+ */
+ if (desc->direction == DMA_DEV_TO_MEM)
+ residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
+ else
+ residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct usb_dmac_desc *desc;
+ u32 residue = 0;
+
+ list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
+ if (desc->done_cookie == cookie) {
+ residue = desc->residue;
+ break;
+ }
+ }
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ u32 residue = 0;
+ struct virt_dma_desc *vd;
+ struct usb_dmac_desc *desc = chan->desc;
+ int i;
+
+ if (!desc) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (!vd)
+ return 0;
+ desc = to_usb_dmac_desc(vd);
+ }
+
+ /* Compute the size of all usb_dmac_sg still to be transferred */
+ for (i = desc->sg_index + 1; i < desc->sg_len; i++)
+ residue += desc->sg[i].size;
+
+ /* Add the residue for the current sg */
+ residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
+
+ return residue;
+}
+
+static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ enum dma_status status;
+ unsigned int residue = 0;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ /* a client driver will get residue after DMA_COMPLETE */
+ if (!txstate)
+ return status;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (status == DMA_COMPLETE)
+ residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
+ else
+ residue = usb_dmac_chan_get_residue(uchan, cookie);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ return status;
+}
+
+static void usb_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
+ usb_dmac_chan_start_desc(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+}
+
+static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
+ struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
+
+ usb_dmac_desc_put(chan, desc);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+
+ BUG_ON(!desc);
+
+ if (++desc->sg_index < desc->sg_len) {
+ usb_dmac_chan_start_sg(chan, desc->sg_index);
+ } else {
+ desc->residue = usb_dmac_get_current_residue(chan, desc,
+ desc->sg_index - 1);
+ desc->done_cookie = desc->vd.tx.cookie;
+ desc->vd.tx_result.result = DMA_TRANS_NOERROR;
+ desc->vd.tx_result.residue = desc->residue;
+ vchan_cookie_complete(&desc->vd);
+
+ /* Restart the next transfer if this driver has a next desc */
+ usb_dmac_chan_start_desc(chan);
+ }
+}
+
+static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
+{
+ struct usb_dmac_chan *chan = dev;
+ irqreturn_t ret = IRQ_NONE;
+ u32 mask = 0;
+ u32 chcr;
+ bool xfer_end = false;
+
+ spin_lock(&chan->vc.lock);
+
+ chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+ if (chcr & USB_DMACHCR_DE)
+ xfer_end = true;
+ ret |= IRQ_HANDLED;
+ }
+ if (chcr & USB_DMACHCR_NULL) {
+ /* An interruption of TE will happen after we set FTE */
+ mask |= USB_DMACHCR_NULL;
+ chcr |= USB_DMACHCR_FTE;
+ ret |= IRQ_HANDLED;
+ }
+ if (mask)
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+
+ if (xfer_end)
+ usb_dmac_isr_transfer_end(chan);
+
+ spin_unlock(&chan->vc.lock);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct of_phandle_args *dma_spec = arg;
+
+ /* USB-DMAC should be used with fixed usb controller's FIFO */
+ if (uchan->index != dma_spec->args[0])
+ return false;
+
+ return true;
+}
+
+static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
+ if (!chan)
+ return NULL;
+
+ return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#ifdef CONFIG_PM
+static int usb_dmac_runtime_suspend(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i) {
+ if (!dmac->channels[i].iomem)
+ break;
+ usb_dmac_chan_halt(&dmac->channels[i]);
+ }
+
+ return 0;
+}
+
+static int usb_dmac_runtime_resume(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+
+ return usb_dmac_init(dmac);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops usb_dmac_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
+ NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int usb_dmac_chan_probe(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ uchan->index = index;
+ uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (uchan->irq < 0)
+ return -ENODEV;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
+ IRQF_SHARED, irqname, uchan);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ uchan->irq, ret);
+ return ret;
+ }
+
+ uchan->vc.desc_free = usb_dmac_virt_desc_free;
+ vchan_init(&uchan->vc, &dmac->engine);
+ INIT_LIST_HEAD(&uchan->desc_freed);
+ INIT_LIST_HEAD(&uchan->desc_got);
+
+ return 0;
+}
+
+static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ dev_err(dev, "invalid number of channels %u\n",
+ dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int usb_dmac_probe(struct platform_device *pdev)
+{
+ const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
+ struct dma_device *engine;
+ struct usb_dmac *dmac;
+ struct resource *mem;
+ unsigned int i;
+ int ret;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = usb_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources. */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dmac->iomem))
+ return PTR_ERR(dmac->iomem);
+
+ /* Enable runtime PM and initialize the device. */
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+ goto error_pm;
+ }
+
+ ret = usb_dmac_init(dmac);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset device\n");
+ goto error;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; ++i) {
+ ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * Register the DMA engine device.
+ *
+ * Default transfer size of 32 bytes requires 32-byte alignment.
+ */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+ engine->dev = &pdev->dev;
+
+ engine->src_addr_widths = widths;
+ engine->dst_addr_widths = widths;
+ engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = usb_dmac_free_chan_resources;
+ engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
+ engine->device_terminate_all = usb_dmac_chan_terminate_all;
+ engine->device_tx_status = usb_dmac_tx_status;
+ engine->device_issue_pending = usb_dmac_issue_pending;
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0)
+ goto error;
+
+ pm_runtime_put(&pdev->dev);
+ return 0;
+
+error:
+ of_dma_controller_free(pdev->dev.of_node);
+error_pm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void usb_dmac_chan_remove(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan)
+{
+ usb_dmac_chan_halt(uchan);
+ devm_free_irq(dmac->dev, uchan->irq, uchan);
+}
+
+static int usb_dmac_remove(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i)
+ usb_dmac_chan_remove(dmac, &dmac->channels[i]);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static void usb_dmac_shutdown(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+
+ usb_dmac_stop(dmac);
+}
+
+static const struct of_device_id usb_dmac_of_ids[] = {
+ { .compatible = "renesas,usb-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
+
+static struct platform_driver usb_dmac_driver = {
+ .driver = {
+ .pm = &usb_dmac_pm,
+ .name = "usb-dmac",
+ .of_match_table = usb_dmac_of_ids,
+ },
+ .probe = usb_dmac_probe,
+ .remove = usb_dmac_remove,
+ .shutdown = usb_dmac_shutdown,
+};
+
+module_platform_driver(usb_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_LICENSE("GPL v2");