summaryrefslogtreecommitdiffstats
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/dma/xilinx
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/Makefile4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c3229
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c1777
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c1182
4 files changed, 6192 insertions, 0 deletions
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
new file mode 100644
index 000000000..767bb45f6
--- /dev/null
+++ b/drivers/dma/xilinx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
+obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
new file mode 100644
index 000000000..766017570
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -0,0 +1,3229 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx Video DMA Engine
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
+ * core that provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream type video target peripherals. The core provides efficient
+ * two dimensional DMA operations with independent asynchronous read (S2MM)
+ * and write (MM2S) channel operation. It can be configured to have either
+ * one channel or two channels. If configured as two channels, one is to
+ * transmit to the video device (MM2S) and another is to receive from the
+ * video device (S2MM). Initialization, status, interrupt and management
+ * registers are accessed through an AXI4-Lite slave interface.
+ *
+ * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
+ * provides high-bandwidth one dimensional direct memory access between memory
+ * and AXI4-Stream target peripherals. It supports one receive and one
+ * transmit channel, both of them optional at synthesis time.
+ *
+ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ * Access (DMA) between a memory-mapped source address and a memory-mapped
+ * destination address.
+ *
+ * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
+ * Xilinx IP that provides high-bandwidth direct memory access between
+ * memory and AXI4-Stream target peripherals. It provides scatter gather
+ * (SG) interface with multiple channels independent configuration support.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
+#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
+#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
+
+/* Control Registers */
+#define XILINX_DMA_REG_DMACR 0x0000
+#define XILINX_DMA_DMACR_DELAY_MAX 0xff
+#define XILINX_DMA_DMACR_DELAY_SHIFT 24
+#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
+#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
+#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMACR_MASTER_SHIFT 8
+#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
+#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
+#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
+#define XILINX_DMA_DMACR_RESET BIT(2)
+#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
+#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
+#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
+
+#define XILINX_DMA_REG_DMASR 0x0004
+#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
+#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
+#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
+#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
+#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
+#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
+#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
+#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
+#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_DMA_DMASR_SG_MASK BIT(3)
+#define XILINX_DMA_DMASR_IDLE BIT(1)
+#define XILINX_DMA_DMASR_HALTED BIT(0)
+#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
+
+#define XILINX_DMA_REG_CURDESC 0x0008
+#define XILINX_DMA_REG_TAILDESC 0x0010
+#define XILINX_DMA_REG_REG_INDEX 0x0014
+#define XILINX_DMA_REG_FRMSTORE 0x0018
+#define XILINX_DMA_REG_THRESHOLD 0x001c
+#define XILINX_DMA_REG_FRMPTR_STS 0x0024
+#define XILINX_DMA_REG_PARK_PTR 0x0028
+#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
+#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
+#define XILINX_DMA_REG_VDMA_VERSION 0x002c
+
+/* Register Direct Mode Registers */
+#define XILINX_DMA_REG_VSIZE 0x0000
+#define XILINX_DMA_REG_HSIZE 0x0004
+
+#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
+#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
+#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
+
+#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
+#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
+
+#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
+#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
+
+/* HW specific definitions */
+#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
+#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
+
+#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
+ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
+ XILINX_DMA_DMASR_DLY_CNT_IRQ | \
+ XILINX_DMA_DMASR_ERR_IRQ)
+
+#define XILINX_DMA_DMASR_ALL_ERR_MASK \
+ (XILINX_DMA_DMASR_EOL_LATE_ERR | \
+ XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_SG_DEC_ERR | \
+ XILINX_DMA_DMASR_SG_SLV_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_DEC_ERR | \
+ XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/*
+ * Recoverable errors are DMA Internal error, SOF Early, EOF Early
+ * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
+ * is enabled in the h/w system.
+ */
+#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
+ (XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/* Axi VDMA Flush on Fsync bits */
+#define XILINX_DMA_FLUSH_S2MM 3
+#define XILINX_DMA_FLUSH_MM2S 2
+#define XILINX_DMA_FLUSH_BOTH 1
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_DMA_LOOP_COUNT 1000000
+
+/* AXI DMA Specific Registers/Offsets */
+#define XILINX_DMA_REG_SRCDSTADDR 0x18
+#define XILINX_DMA_REG_BTT 0x28
+
+/* AXI DMA Specific Masks/Bit fields */
+#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
+#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
+#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
+#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
+#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_BD_SOP BIT(27)
+#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_DESCS 255
+#define XILINX_DMA_NUM_APP_WORDS 5
+
+/* AXI CDMA Specific Registers/Offsets */
+#define XILINX_CDMA_REG_SRCADDR 0x18
+#define XILINX_CDMA_REG_DSTADDR 0x20
+
+/* AXI CDMA Specific Masks */
+#define XILINX_CDMA_CR_SGMODE BIT(3)
+
+#define xilinx_prep_dma_addr_t(addr) \
+ ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
+
+/* AXI MCDMA Specific Registers/Offsets */
+#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
+#define XILINX_MCDMA_CHEN_OFFSET 0x0008
+#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
+#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
+#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
+#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
+
+/* AXI MCDMA Specific Masks/Shifts */
+#define XILINX_MCDMA_COALESCE_SHIFT 16
+#define XILINX_MCDMA_COALESCE_MAX 24
+#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
+#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
+#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
+#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
+#define XILINX_MCDMA_BD_EOP BIT(30)
+#define XILINX_MCDMA_BD_SOP BIT(31)
+
+/**
+ * struct xilinx_vdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @vsize: Vertical Size @0x10
+ * @hsize: Horizontal Size @0x14
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line @0x18
+ */
+struct xilinx_vdma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_axidma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 reserved1;
+ u32 reserved2;
+ u32 control;
+ u32 status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @rsvd: Reserved field @0x10
+ * @control: Control Information field @0x14
+ * @status: Status field @0x18
+ * @sideband_status: Status of sideband signals @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_aximcdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 rsvd;
+ u32 control;
+ u32 status;
+ u32 sideband_status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
+ * @src_addr: Source address @0x08
+ * @src_addr_msb: Source address MSB @0x0C
+ * @dest_addr: Destination address @0x10
+ * @dest_addr_msb: Destination address MSB @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 src_addr;
+ u32 src_addr_msb;
+ u32 dest_addr;
+ u32 dest_addr_msb;
+ u32 control;
+ u32 status;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_vdma_tx_segment {
+ struct xilinx_vdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_axidma_tx_segment {
+ struct xilinx_axidma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_aximcdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_aximcdma_tx_segment {
+ struct xilinx_aximcdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+ struct xilinx_cdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
+ */
+struct xilinx_dma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head segments;
+ struct list_head node;
+ bool cyclic;
+ bool err;
+ u32 residue;
+};
+
+/**
+ * struct xilinx_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @ctrl_offset: Control registers offset
+ * @desc_offset: TX descriptor registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @active_list: Descriptors ready to submit
+ * @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @id: Channel ID
+ * @direction: Transfer direction
+ * @num_frms: Number of frames
+ * @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
+ * @genlock: Support genlock mode
+ * @err: Channel has errors
+ * @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
+ * @tasklet: Cleanup work after irq
+ * @config: Device configuration info
+ * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
+ * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
+ * @seg_v: Statically allocated segments base
+ * @seg_mv: Statically allocated segments base for MCDMA
+ * @seg_p: Physical allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
+ * @start_transfer: Differentiate b/w DMA IP's transfer
+ * @stop_transfer: Differentiate b/w DMA IP's quiesce
+ * @tdest: TDEST value for mcdma
+ * @has_vflip: S2MM vertical flip
+ */
+struct xilinx_dma_chan {
+ struct xilinx_dma_device *xdev;
+ u32 ctrl_offset;
+ u32 desc_offset;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head active_list;
+ struct list_head done_list;
+ struct list_head free_seg_list;
+ struct dma_chan common;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+ int irq;
+ int id;
+ enum dma_transfer_direction direction;
+ int num_frms;
+ bool has_sg;
+ bool cyclic;
+ bool genlock;
+ bool err;
+ bool idle;
+ bool terminating;
+ struct tasklet_struct tasklet;
+ struct xilinx_vdma_config config;
+ bool flush_on_fsync;
+ u32 desc_pendingcount;
+ bool ext_addr;
+ u32 desc_submitcount;
+ struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_aximcdma_tx_segment *seg_mv;
+ dma_addr_t seg_p;
+ struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ dma_addr_t cyclic_seg_p;
+ void (*start_transfer)(struct xilinx_dma_chan *chan);
+ int (*stop_transfer)(struct xilinx_dma_chan *chan);
+ u16 tdest;
+ bool has_vflip;
+};
+
+/**
+ * enum xdma_ip_type - DMA IP type.
+ *
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
+ *
+ */
+enum xdma_ip_type {
+ XDMA_TYPE_AXIDMA = 0,
+ XDMA_TYPE_CDMA,
+ XDMA_TYPE_VDMA,
+ XDMA_TYPE_AXIMCDMA
+};
+
+struct xilinx_dma_config {
+ enum xdma_ip_type dmatype;
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk);
+ irqreturn_t (*irq_handler)(int irq, void *data);
+ const int max_channels;
+};
+
+/**
+ * struct xilinx_dma_device - DMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ * @flush_on_fsync: Flush on frame sync
+ * @ext_addr: Indicates 64 bit addressing is supported by dma device
+ * @pdev: Platform device structure pointer
+ * @dma_config: DMA config structure
+ * @axi_clk: DMA Axi4-lite interace clock
+ * @tx_clk: DMA mm2s clock
+ * @txs_clk: DMA mm2s stream clock
+ * @rx_clk: DMA s2mm clock
+ * @rxs_clk: DMA s2mm stream clock
+ * @s2mm_chan_id: DMA s2mm channel identifier
+ * @mm2s_chan_id: DMA mm2s channel identifier
+ * @max_buffer_len: Max buffer length
+ */
+struct xilinx_dma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
+ u32 flush_on_fsync;
+ bool ext_addr;
+ struct platform_device *pdev;
+ const struct xilinx_dma_config *dma_config;
+ struct clk *axi_clk;
+ struct clk *tx_clk;
+ struct clk *txs_clk;
+ struct clk *rx_clk;
+ struct clk *rxs_clk;
+ u32 s2mm_chan_id;
+ u32 mm2s_chan_id;
+ u32 max_buffer_len;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_dma_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
+ val, cond, delay_us, timeout_us)
+
+/* IO accessors */
+static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ dma_write(chan, chan->desc_offset + reg, value);
+}
+
+static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+ return dma_read(chan, chan->ctrl_offset + reg);
+}
+
+static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ dma_write(chan, chan->ctrl_offset + reg, value);
+}
+
+static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
+ u32 clr)
+{
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
+}
+
+static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
+ u32 set)
+{
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
+}
+
+/**
+ * vdma_desc_write_64 - 64-bit descriptor write
+ * @chan: Driver specific VDMA channel
+ * @reg: Register to write
+ * @value_lsb: lower address of the descriptor.
+ * @value_msb: upper address of the descriptor.
+ *
+ * Since vdma driver is trying to write to a register offset which is not a
+ * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
+ * instead of a single 64 bit register write.
+ */
+static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value_lsb, u32 value_msb)
+{
+ /* Write the lsb 32 bits*/
+ writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
+
+ /* Write the msb 32 bits */
+ writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
+}
+
+static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
+{
+ lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
+}
+
+static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ if (chan->ext_addr)
+ dma_writeq(chan, reg, addr);
+ else
+ dma_ctrl_write(chan, reg, addr);
+}
+
+static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used,
+ size_t period_len)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
+ period_len);
+ } else {
+ hw->buf_addr = buf_addr + sg_used + period_len;
+ }
+}
+
+static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
+ } else {
+ hw->buf_addr = buf_addr + sg_used;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_segment *
+xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_vdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_cdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_axidma_tx_segment *
+xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_axidma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (!segment)
+ dev_dbg(chan->dev, "Could not find free tx segment\n");
+
+ return segment;
+}
+
+/**
+ * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_aximcdma_tx_segment *
+xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return segment;
+}
+
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
+static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
+/**
+ * xilinx_dma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_tx_segment *segment)
+{
+ xilinx_dma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
+/**
+ * xilinx_mcdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_tx_segment *
+ segment)
+{
+ xilinx_mcdma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_cdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_vdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_dma_tx_descriptor *
+xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->segments);
+
+ return desc;
+}
+
+/**
+ * xilinx_dma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific DMA channel
+ * @desc: DMA transaction descriptor
+ */
+static void
+xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *segment, *next;
+ struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
+ struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+ struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
+
+ if (!desc)
+ return;
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ list_for_each_entry_safe(segment, next, &desc->segments, node) {
+ list_del(&segment->node);
+ xilinx_vdma_free_tx_segment(chan, segment);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ list_for_each_entry_safe(cdma_segment, cdma_next,
+ &desc->segments, node) {
+ list_del(&cdma_segment->node);
+ xilinx_cdma_free_tx_segment(chan, cdma_segment);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ list_for_each_entry_safe(axidma_segment, axidma_next,
+ &desc->segments, node) {
+ list_del(&axidma_segment->node);
+ xilinx_dma_free_tx_segment(chan, axidma_segment);
+ }
+ } else {
+ list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
+ &desc->segments, node) {
+ list_del(&aximcdma_segment->node);
+ xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
+ }
+ }
+
+ kfree(desc);
+}
+
+/* Required functions */
+
+/**
+ * xilinx_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific DMA channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xilinx_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dma_free_desc_list(chan, &chan->pending_list);
+ xilinx_dma_free_desc_list(chan, &chan->done_list);
+ xilinx_dma_free_desc_list(chan, &chan->active_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+
+ xilinx_dma_free_descriptors(chan);
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+
+ /* Free Memory that is allocated for cyclic DMA Mode */
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS, chan->seg_mv,
+ chan->seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+ }
+
+}
+
+/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_cdma_tx_segment *cdma_seg;
+ struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_aximcdma_tx_segment *aximcdma_seg;
+ struct xilinx_cdma_desc_hw *cdma_hw;
+ struct xilinx_axidma_desc_hw *axidma_hw;
+ struct xilinx_aximcdma_desc_hw *aximcdma_hw;
+ struct list_head *entry;
+ u32 residue = 0;
+
+ list_for_each(entry, &desc->segments) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_seg = list_entry(entry,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_hw = &cdma_seg->hw;
+ residue += (cdma_hw->control - cdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else if (chan->xdev->dma_config->dmatype ==
+ XDMA_TYPE_AXIDMA) {
+ axidma_seg = list_entry(entry,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_hw = &axidma_seg->hw;
+ residue += (axidma_hw->control - axidma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else {
+ aximcdma_seg =
+ list_entry(entry,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ aximcdma_hw = &aximcdma_seg->hw;
+ residue +=
+ (aximcdma_hw->control - aximcdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ }
+ }
+
+ return residue;
+}
+
+/**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc,
+ unsigned long *flags)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
+ spin_unlock_irqrestore(&chan->lock, *flags);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+ spin_lock_irqsave(&chan->lock, *flags);
+ }
+}
+
+/**
+ * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ struct dmaengine_result result;
+
+ if (desc->cyclic) {
+ xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+ break;
+ }
+
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ if (unlikely(desc->err)) {
+ if (chan->direction == DMA_DEV_TO_MEM)
+ result.result = DMA_TRANS_READ_FAILED;
+ else
+ result.result = DMA_TRANS_WRITE_FAILED;
+ } else {
+ result.result = DMA_TRANS_NOERROR;
+ }
+
+ result.residue = desc->residue;
+
+ /* Run the link descriptor callback function */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ xilinx_dma_free_tx_descriptor(chan, desc);
+
+ /*
+ * While we ran a callback the user called a terminate function,
+ * which takes care of cleaning up any remaining descriptors
+ */
+ if (chan->terminating)
+ break;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_do_tasklet - Schedule completion tasklet
+ * @t: Pointer to the Xilinx DMA channel structure
+ */
+static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
+{
+ struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
+
+ xilinx_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ int i;
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 0;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx VDMA specification requirement.
+ */
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ /* Allocate the buffer descriptors. */
+ chan->seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+ /*
+ * For cyclic DMA mode we need to program the tail Descriptor
+ * register with a value which is not a part of the BD chain
+ * so allocating a desc segment during channel allocation for
+ * programming tail descriptor.
+ */
+ chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p,
+ GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
+
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_v[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_v[i].node,
+ &chan->free_seg_list);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ /* Allocate the buffer descriptors. */
+ chan->seg_mv = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_mv) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_mv[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].phys = chan->seg_p +
+ sizeof(*chan->seg_mv) * i;
+ list_add_tail(&chan->seg_mv[i].node,
+ &chan->free_seg_list);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_cdma_tx_segment),
+ __alignof__(struct xilinx_cdma_tx_segment),
+ 0);
+ } else {
+ chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_vdma_tx_segment),
+ __alignof__(struct xilinx_vdma_tx_segment),
+ 0);
+ }
+
+ if (!chan->desc_pool &&
+ ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ dma_cookie_init(dchan);
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ /* For AXI DMA resetting once channel will reset the
+ * other channel as well so enable the interrupts here.
+ */
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+ }
+
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
+ * @chan: Driver specific DMA channel
+ * @size: Total data that needs to be copied
+ * @done: Amount of data that has been already copied
+ *
+ * Return: Amount of data that has to be copied
+ */
+static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
+ int size, int done)
+{
+ size_t copy;
+
+ copy = min_t(size_t, size - done,
+ chan->xdev->max_buffer_len);
+
+ if ((copy + done < size) &&
+ chan->xdev->common.copy_align) {
+ /*
+ * If this is not the last descriptor, make sure
+ * the next one will be properly aligned
+ */
+ copy = rounddown(copy,
+ (1 << chan->xdev->common.copy_align));
+ }
+ return copy;
+}
+
+/**
+ * xilinx_dma_tx_status - Get DMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ enum dma_status ret;
+ unsigned long flags;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->active_list)) {
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ /*
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+ residue = xilinx_dma_get_residue(chan, desc);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ return ret;
+}
+
+/**
+ * xilinx_dma_stop_transfer - Halt DMA channel
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
+{
+ u32 val;
+
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to halt */
+ return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ val & XILINX_DMA_DMASR_HALTED, 0,
+ XILINX_DMA_LOOP_COUNT);
+}
+
+/**
+ * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
+{
+ u32 val;
+
+ return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ val & XILINX_DMA_DMASR_IDLE, 0,
+ XILINX_DMA_LOOP_COUNT);
+}
+
+/**
+ * xilinx_dma_start - Start DMA channel
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_start(struct xilinx_dma_chan *chan)
+{
+ int err;
+ u32 val;
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to start */
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ !(val & XILINX_DMA_DMASR_HALTED), 0,
+ XILINX_DMA_LOOP_COUNT);
+
+ if (err) {
+ dev_err(chan->dev, "Cannot start channel %p: %x\n",
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+
+ chan->err = true;
+ }
+}
+
+/**
+ * xilinx_vdma_start_transfer - Starts VDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_vdma_config *config = &chan->config;
+ struct xilinx_dma_tx_descriptor *desc;
+ u32 reg, j;
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
+
+ /* This function was invoked with lock held */
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+
+ /* Configure the hardware using info in the config structure */
+ if (chan->has_vflip) {
+ reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
+ reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ reg |= config->vflip_en;
+ dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
+ reg);
+ }
+
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (config->frm_cnt_en)
+ reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
+ else
+ reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
+
+ /* If not parking, enable circular mode */
+ if (config->park)
+ reg &= ~XILINX_DMA_DMACR_CIRC_EN;
+ else
+ reg |= XILINX_DMA_DMACR_CIRC_EN;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ }
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
+
+ /* Start the hardware */
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ if (chan->desc_submitcount < chan->num_frms)
+ i = chan->desc_submitcount;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ if (chan->ext_addr)
+ vdma_desc_write_64(chan,
+ XILINX_VDMA_REG_START_ADDRESS_64(i++),
+ segment->hw.buf_addr,
+ segment->hw.buf_addr_msb);
+ else
+ vdma_desc_write(chan,
+ XILINX_VDMA_REG_START_ADDRESS(i++),
+ segment->hw.buf_addr);
+
+ last = segment;
+ }
+
+ if (!last)
+ return;
+
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+
+ chan->desc_submitcount++;
+ chan->desc_pendingcount--;
+ list_move_tail(&desc->node, &chan->active_list);
+ if (chan->desc_submitcount == chan->num_frms)
+ chan->desc_submitcount = 0;
+
+ chan->idle = false;
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_cdma_tx_segment *tail_segment;
+ u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ ctrl_reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
+ }
+
+ if (chan->has_sg) {
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ /* Update tail ptr register which will start the transfer */
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ /* In simple mode */
+ struct xilinx_cdma_tx_segment *segment;
+ struct xilinx_cdma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+
+ hw = &segment->hw;
+
+ xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
+ xilinx_prep_dma_addr_t(hw->src_addr));
+ xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
+ xilinx_prep_dma_addr_t(hw->dest_addr));
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & chan->xdev->max_buffer_len);
+ }
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
+ * xilinx_dma_start_transfer - Starts DMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment;
+ u32 reg;
+
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ if (!chan->idle)
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ }
+
+ if (chan->has_sg)
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ if (chan->has_sg) {
+ if (chan->cyclic)
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ chan->cyclic_seg_v->phys);
+ else
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ hw = &segment->hw;
+
+ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+ xilinx_prep_dma_addr_t(hw->buf_addr));
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & chan->xdev->max_buffer_len);
+ }
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
+ * xilinx_mcdma_start_transfer - Starts MCDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_aximcdma_tx_segment *tail_segment;
+ u32 reg;
+
+ /*
+ * lock has been held by calling functions, so we don't need it
+ * to take it here again.
+ */
+
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_aximcdma_tx_segment, node);
+
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+
+ if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
+ reg &= ~XILINX_MCDMA_COALESCE_MASK;
+ reg |= chan->desc_pendingcount <<
+ XILINX_MCDMA_COALESCE_SHIFT;
+ }
+
+ reg |= XILINX_MCDMA_IRQ_ALL_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ /* Program current descriptor */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+ head_desc->async_tx.phys);
+
+ /* Program channel enable register */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
+ reg |= BIT(chan->tdest);
+ dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
+
+ /* Start the fetch of BDs for the channel */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+ reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
+ tail_segment->phys);
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
+ * xilinx_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_device_config - Configure the DMA channel
+ * @dchan: DMA channel
+ * @config: channel configuration
+ */
+static int xilinx_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ return 0;
+}
+
+/**
+ * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+
+ /* This function was invoked with lock held */
+ if (list_empty(&chan->active_list))
+ return;
+
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->has_sg && chan->xdev->dma_config->dmatype !=
+ XDMA_TYPE_VDMA)
+ desc->residue = xilinx_dma_get_residue(chan, desc);
+ else
+ desc->residue = 0;
+ desc->err = chan->err;
+
+ list_del(&desc->node);
+ if (!desc->cyclic)
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+ }
+}
+
+/**
+ * xilinx_dma_reset - Reset DMA channel
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
+{
+ int err;
+ u32 tmp;
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
+
+ /* Wait for the hardware to finish reset */
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
+ !(tmp & XILINX_DMA_DMACR_RESET), 0,
+ XILINX_DMA_LOOP_COUNT);
+
+ if (err) {
+ dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+ return -ETIMEDOUT;
+ }
+
+ chan->err = false;
+ chan->idle = true;
+ chan->desc_pendingcount = 0;
+ chan->desc_submitcount = 0;
+
+ return err;
+}
+
+/**
+ * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
+{
+ int err;
+
+ /* Reset VDMA */
+ err = xilinx_dma_reset(chan);
+ if (err)
+ return err;
+
+ /* Enable interrupts */
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ return 0;
+}
+
+/**
+ * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx MCDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
+ else
+ ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
+
+ /* Read the channel id raising the interrupt*/
+ chan_sermask = dma_ctrl_read(chan, ser_offset);
+ chan_id = ffs(chan_sermask);
+
+ if (!chan_id)
+ return IRQ_NONE;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ chan_offset = chan->xdev->dma_config->max_channels / 2;
+
+ chan_offset = chan_offset + (chan_id - 1);
+ chan = chan->xdev->chan[chan_offset];
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
+ if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
+ status & XILINX_MCDMA_IRQ_ALL_MASK);
+
+ if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
+ dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
+ chan,
+ dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
+ (chan->tdest)),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
+ (chan->tdest)));
+ chan->err = true;
+ }
+
+ if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_dma_irq_handler - DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status;
+
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
+ if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ if (status & XILINX_DMA_DMASR_ERR_IRQ) {
+ /*
+ * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
+ * error is recoverable, ignore it. Otherwise flag the error.
+ *
+ * Only recoverable errors can be cleared in the DMASR register,
+ * make sure not to write to other error bits to 1.
+ */
+ u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
+
+ if (!chan->flush_on_fsync ||
+ (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
+ dev_err(chan->dev,
+ "Channel %p has errors %x, cdr %x tdr %x\n",
+ chan, errors,
+ dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
+ dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
+ chan->err = true;
+ }
+ }
+
+ if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_dma_tx_descriptor *tail_desc;
+ struct xilinx_axidma_tx_segment *axidma_tail_segment;
+ struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
+ struct xilinx_cdma_tx_segment *cdma_tail_segment;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment,
+ node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ axidma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else {
+ aximcdma_tail_segment =
+ list_last_entry(&tail_desc->segments,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ }
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+ && unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
+ * xilinx_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int err;
+
+ if (chan->cyclic) {
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return -EBUSY;
+ }
+
+ if (chan->err) {
+ /*
+ * If reset fails, need to hard reset the system.
+ * Channel is no longer functional
+ */
+ err = xilinx_dma_chan_reset(chan);
+ if (err < 0)
+ return err;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
+
+ if (desc->cyclic)
+ chan->cyclic = true;
+
+ chan->terminating = false;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_segment *segment;
+ struct xilinx_vdma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ if (xt->frame_size != 1)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_vdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /* Fill in the hardware descriptor */
+ hw = &segment->hw;
+ hw->vsize = xt->numf;
+ hw->hsize = xt->sgl[0].size;
+ hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
+ XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+ hw->stride |= chan->config.frm_dly <<
+ XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+ if (xt->dir != DMA_MEM_TO_DEV) {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->dst_start);
+ hw->buf_addr_msb = upper_32_bits(xt->dst_start);
+ } else {
+ hw->buf_addr = xt->dst_start;
+ }
+ } else {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->src_start);
+ hw->buf_addr_msb = upper_32_bits(xt->src_start);
+ } else {
+ hw->buf_addr = xt->src_start;
+ }
+ }
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment;
+ struct xilinx_cdma_desc_hw *hw;
+
+ if (!len || len > chan->xdev->max_buffer_len)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ desc->async_tx.phys = segment->phys;
+ hw->next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
+ sg_used, 0);
+
+ hw->control = copy;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ if (app_w)
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @buf_addr: Physical address of the buffer
+ * @buf_len: Total length of the cyclic buffers
+ * @period_len: length of individual cyclic buffer
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+ size_t copy, sg_used;
+ unsigned int num_periods;
+ int i;
+ u32 reg;
+
+ if (!period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ if (!num_periods)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ chan->direction = direction;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ for (i = 0; i < num_periods; ++i) {
+ sg_used = 0;
+
+ while (sg_used < period_len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, period_len,
+ sg_used);
+ hw = &segment->hw;
+ xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
+ period_len * i);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head_segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head_segment->phys;
+
+ desc->cyclic = true;
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.next_desc = (u32) head_segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (direction == DMA_MEM_TO_DEV) {
+ head_segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_aximcdma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_aximcdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ chan->xdev->max_buffer_len);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
+ sg_used);
+ hw->control = copy;
+
+ if (chan->direction == DMA_MEM_TO_DEV && app_w) {
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ sg_used += copy;
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_aximcdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_MCDMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_MCDMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dma_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific DMA Channel pointer
+ *
+ * Return: '0' always.
+ */
+static int xilinx_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ u32 reg;
+ int err;
+
+ if (!chan->cyclic) {
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan,
+ XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
+ }
+
+ xilinx_dma_chan_reset(chan);
+ /* Remove and free all of the descriptors in the lists */
+ chan->terminating = true;
+ xilinx_dma_free_descriptors(chan);
+ chan->idle = true;
+
+ if (chan->cyclic) {
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ chan->cyclic = false;
+ }
+
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ return 0;
+}
+
+static void xilinx_dma_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/**
+ * xilinx_vdma_channel_set_config - Configure VDMA channel
+ * Run-time configuration for Axi VDMA, supports:
+ * . halt the channel
+ * . configure interrupt coalescing and inter-packet delay threshold
+ * . start/stop parking
+ * . enable genlock
+ *
+ * @dchan: DMA channel
+ * @cfg: VDMA device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+ struct xilinx_vdma_config *cfg)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ u32 dmacr;
+
+ if (cfg->reset)
+ return xilinx_dma_chan_reset(chan);
+
+ dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ chan->config.frm_dly = cfg->frm_dly;
+ chan->config.park = cfg->park;
+
+ /* genlock settings */
+ chan->config.gen_lock = cfg->gen_lock;
+ chan->config.master = cfg->master;
+
+ dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
+ if (cfg->gen_lock && chan->genlock) {
+ dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
+ dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
+ }
+
+ chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ chan->config.vflip_en = cfg->vflip_en;
+
+ if (cfg->park)
+ chan->config.park_frm = cfg->park_frm;
+ else
+ chan->config.park_frm = -1;
+
+ chan->config.coalesc = cfg->coalesc;
+ chan->config.delay = cfg->delay;
+
+ if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
+ dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
+ chan->config.coalesc = cfg->coalesc;
+ }
+
+ if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
+ dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
+ chan->config.delay = cfg->delay;
+ }
+
+ /* FSync Source selection */
+ dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
+ dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
+{
+ /* Disable all interrupts */
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ if (chan->irq > 0)
+ free_irq(chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+
+ list_del(&chan->common.device_node);
+}
+
+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **rx_clk,
+ struct clk **sg_clk, struct clk **tmp_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(*sg_clk))
+ *sg_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **dev_clk, struct clk **tmp_clk,
+ struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+ *tmp1_clk = NULL;
+ *tmp2_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
+
+ *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+ if (IS_ERR(*dev_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*dev_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ return 0;
+
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk)
+{
+ int err;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(*txs_clk))
+ *txs_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(*rxs_clk))
+ *rxs_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
+ err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*txs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
+ goto err_disable_txsclk;
+ }
+
+ err = clk_prepare_enable(*rxs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+ clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+ clk_disable_unprepare(xdev->rxs_clk);
+ clk_disable_unprepare(xdev->rx_clk);
+ clk_disable_unprepare(xdev->txs_clk);
+ clk_disable_unprepare(xdev->tx_clk);
+ clk_disable_unprepare(xdev->axi_clk);
+}
+
+/**
+ * xilinx_dma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_dma_chan *chan;
+ bool has_dre = false;
+ u32 value, width;
+ int err;
+
+ /* Allocate and initialize the channel structure */
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->desc_pendingcount = 0x0;
+ chan->ext_addr = xdev->ext_addr;
+ /* This variable ensures that descriptors are not
+ * Submitted when dma engine is in progress. This variable is
+ * Added to avoid polling for a bit in the status register to
+ * Know dma state in the driver hot path.
+ */
+ chan->idle = true;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+
+ /* Retrieve the channel properties from the device tree */
+ has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+ chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+
+ err = of_property_read_u32(node, "xlnx,datawidth", &value);
+ if (err) {
+ dev_err(xdev->dev, "missing xlnx,datawidth property\n");
+ return err;
+ }
+ width = value >> 3; /* Convert bits to bytes */
+
+ /* If data width is greater than 8 bytes, DRE is not in hw */
+ if (width > 8)
+ has_dre = false;
+
+ if (!has_dre)
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
+
+ if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
+ chan->direction = DMA_MEM_TO_DEV;
+ chan->id = xdev->mm2s_chan_id++;
+ chan->tdest = chan->id;
+
+ chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->config.park = 1;
+
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
+ chan->flush_on_fsync = true;
+ }
+ } else if (of_device_is_compatible(node,
+ "xlnx,axi-vdma-s2mm-channel") ||
+ of_device_is_compatible(node,
+ "xlnx,axi-dma-s2mm-channel")) {
+ chan->direction = DMA_DEV_TO_MEM;
+ chan->id = xdev->s2mm_chan_id++;
+ chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
+ chan->has_vflip = of_property_read_bool(node,
+ "xlnx,enable-vert-flip");
+ if (chan->has_vflip) {
+ chan->config.vflip_en = dma_read(chan,
+ XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
+ XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
+ else
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->config.park = 1;
+
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
+ chan->flush_on_fsync = true;
+ }
+ } else {
+ dev_err(xdev->dev, "Invalid channel compatible node\n");
+ return -EINVAL;
+ }
+
+ /* Request the interrupt */
+ chan->irq = of_irq_get(node, chan->tdest);
+ if (chan->irq < 0)
+ return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
+ err = request_irq(chan->irq, xdev->dma_config->irq_handler,
+ IRQF_SHARED, "xilinx-dma-controller", chan);
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ chan->start_transfer = xilinx_dma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ chan->start_transfer = xilinx_mcdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ chan->start_transfer = xilinx_cdma_start_transfer;
+ chan->stop_transfer = xilinx_cdma_stop_transfer;
+ } else {
+ chan->start_transfer = xilinx_vdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
+ }
+
+ /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
+ if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_SG_MASK)
+ chan->has_sg = true;
+ dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
+ chan->has_sg ? "enabled" : "disabled");
+ }
+
+ /* Initialize the tasklet */
+ tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ /* Reset the channel */
+ err = xilinx_dma_chan_reset(chan);
+ if (err < 0) {
+ dev_err(xdev->dev, "Reset channel failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_child_probe - Per child node probe
+ * It get number of dma-channels per child node from
+ * device-tree and initializes all the channels.
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: 0 always.
+ */
+static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+ struct device_node *node)
+{
+ int ret, i;
+ u32 nr_channels = 1;
+
+ ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
+ dev_warn(xdev->dev, "missing dma-channels property\n");
+
+ for (i = 0; i < nr_channels; i++) {
+ ret = xilinx_dma_chan_probe(xdev, node);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dma_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static const struct xilinx_dma_config axidma_config = {
+ .dmatype = XDMA_TYPE_AXIDMA,
+ .clk_init = axidma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
+};
+
+static const struct xilinx_dma_config aximcdma_config = {
+ .dmatype = XDMA_TYPE_AXIMCDMA,
+ .clk_init = axidma_clk_init,
+ .irq_handler = xilinx_mcdma_irq_handler,
+ .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
+};
+static const struct xilinx_dma_config axicdma_config = {
+ .dmatype = XDMA_TYPE_CDMA,
+ .clk_init = axicdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
+};
+
+static const struct xilinx_dma_config axivdma_config = {
+ .dmatype = XDMA_TYPE_VDMA,
+ .clk_init = axivdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
+};
+
+static const struct of_device_id xilinx_dma_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+ { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+ { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
+
+/**
+ * xilinx_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+ struct clk **, struct clk **, struct clk **)
+ = axivdma_clk_init;
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_dma_device *xdev;
+ struct device_node *child, *np = pdev->dev.of_node;
+ u32 num_frames, addr_width, len_width;
+ int i, err;
+
+ /* Allocate and initialize the DMA engine structure */
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xilinx_dma_of_ids, np);
+ if (match && match->data) {
+ xdev->dma_config = match->data;
+ clk_init = xdev->dma_config->clk_init;
+ }
+ }
+
+ err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
+ &xdev->rx_clk, &xdev->rxs_clk);
+ if (err)
+ return err;
+
+ /* Request and map I/O memory */
+ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
+ /* Retrieve the DMA engine properties from the device tree */
+ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+ xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
+ xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ if (!of_property_read_u32(node, "xlnx,sg-length-width",
+ &len_width)) {
+ if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
+ len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
+ dev_warn(xdev->dev,
+ "invalid xlnx,sg-length-width property value. Using default width\n");
+ } else {
+ if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
+ dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
+ xdev->max_buffer_len =
+ GENMASK(len_width - 1, 0);
+ }
+ }
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ err = of_property_read_u32(node, "xlnx,num-fstores",
+ &num_frames);
+ if (err < 0) {
+ dev_err(xdev->dev,
+ "missing xlnx,num-fstores property\n");
+ goto disable_clks;
+ }
+
+ err = of_property_read_u32(node, "xlnx,flush-fsync",
+ &xdev->flush_on_fsync);
+ if (err < 0)
+ dev_warn(xdev->dev,
+ "missing xlnx,flush-fsync property\n");
+ }
+
+ err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
+ if (err < 0)
+ dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+
+ if (addr_width > 32)
+ xdev->ext_addr = true;
+ else
+ xdev->ext_addr = false;
+
+ /* Set the dma mask bits */
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
+
+ /* Initialize the DMA engine */
+ xdev->common.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ }
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+ xdev->common.device_synchronize = xilinx_dma_synchronize;
+ xdev->common.device_tx_status = xilinx_dma_tx_status;
+ xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ xdev->common.device_config = xilinx_dma_device_config;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+ xdev->common.device_prep_dma_cyclic =
+ xilinx_dma_prep_dma_cyclic;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
+ } else {
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_vdma_dma_prep_interleaved;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_dma_child_probe(xdev, child);
+ if (err < 0) {
+ of_node_put(child);
+ goto error;
+ }
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
+ if (xdev->chan[i])
+ xdev->chan[i]->num_frms = num_frames;
+ }
+
+ /* Register the DMA engine with the core */
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+ xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&xdev->common);
+ goto error;
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
+ else
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+
+ return 0;
+
+error:
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
+ if (xdev->chan[i])
+ xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
+
+ return err;
+}
+
+/**
+ * xilinx_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_dma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+
+ dma_async_device_unregister(&xdev->common);
+
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
+ if (xdev->chan[i])
+ xilinx_dma_chan_remove(xdev->chan[i]);
+
+ xdma_disable_allclks(xdev);
+
+ return 0;
+}
+
+static struct platform_driver xilinx_vdma_driver = {
+ .driver = {
+ .name = "xilinx-vdma",
+ .of_match_table = xilinx_dma_of_ids,
+ },
+ .probe = xilinx_dma_probe,
+ .remove = xilinx_dma_remove,
+};
+
+module_platform_driver(xilinx_vdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx VDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
new file mode 100644
index 000000000..84dc5240a
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP DPDMA Engine driver
+ *
+ * Copyright (C) 2015 - 2020 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_dpdma.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/* DPDMA registers */
+#define XILINX_DPDMA_ERR_CTRL 0x000
+#define XILINX_DPDMA_ISR 0x004
+#define XILINX_DPDMA_IMR 0x008
+#define XILINX_DPDMA_IEN 0x00c
+#define XILINX_DPDMA_IDS 0x010
+#define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
+#define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
+#define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
+#define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
+#define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
+#define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
+#define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
+#define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
+#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
+#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
+#define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
+#define XILINX_DPDMA_INTR_VSYNC BIT(27)
+#define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
+#define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
+#define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
+#define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
+#define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
+#define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
+#define XILINX_DPDMA_INTR_ALL 0x0fffffff
+#define XILINX_DPDMA_EISR 0x014
+#define XILINX_DPDMA_EIMR 0x018
+#define XILINX_DPDMA_EIEN 0x01c
+#define XILINX_DPDMA_EIDS 0x020
+#define XILINX_DPDMA_EINTR_INV_APB BIT(0)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
+#define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
+#define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
+#define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
+#define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
+#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
+#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
+#define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
+#define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
+#define XILINX_DPDMA_EINTR_ALL 0xffffffff
+#define XILINX_DPDMA_CNTL 0x100
+#define XILINX_DPDMA_GBL 0x104
+#define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
+#define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
+#define XILINX_DPDMA_ALC0_CNTL 0x108
+#define XILINX_DPDMA_ALC0_STATUS 0x10c
+#define XILINX_DPDMA_ALC0_MAX 0x110
+#define XILINX_DPDMA_ALC0_MIN 0x114
+#define XILINX_DPDMA_ALC0_ACC 0x118
+#define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
+#define XILINX_DPDMA_ALC1_CNTL 0x120
+#define XILINX_DPDMA_ALC1_STATUS 0x124
+#define XILINX_DPDMA_ALC1_MAX 0x128
+#define XILINX_DPDMA_ALC1_MIN 0x12c
+#define XILINX_DPDMA_ALC1_ACC 0x130
+#define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
+
+/* Channel register */
+#define XILINX_DPDMA_CH_BASE 0x200
+#define XILINX_DPDMA_CH_OFFSET 0x100
+#define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
+#define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
+#define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
+#define XILINX_DPDMA_CH_CNTL 0x018
+#define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
+#define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
+#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
+#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
+#define XILINX_DPDMA_CH_STATUS 0x01c
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
+#define XILINX_DPDMA_CH_VDO 0x020
+#define XILINX_DPDMA_CH_PYLD_SZ 0x024
+#define XILINX_DPDMA_CH_DESC_ID 0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
+
+/* DPDMA descriptor fields */
+#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
+#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
+#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
+#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
+#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
+#define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
+#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
+#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
+#define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
+#define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
+#define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
+
+#define XILINX_DPDMA_ALIGN_BYTES 256
+#define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
+
+#define XILINX_DPDMA_NUM_CHAN 6
+
+struct xilinx_dpdma_chan;
+
+/**
+ * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
+ * @control: control configuration field
+ * @desc_id: descriptor ID
+ * @xfer_size: transfer size
+ * @hsize_stride: horizontal size and stride
+ * @timestamp_lsb: LSB of time stamp
+ * @timestamp_msb: MSB of time stamp
+ * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
+ * @next_desc: next descriptor 32 bit address
+ * @src_addr: payload source address (1st page, 32 LSB)
+ * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
+ * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
+ * @src_addr2: payload source address (2nd page, 32 LSB)
+ * @src_addr3: payload source address (3rd page, 32 LSB)
+ * @src_addr4: payload source address (4th page, 32 LSB)
+ * @src_addr5: payload source address (5th page, 32 LSB)
+ * @crc: descriptor CRC
+ */
+struct xilinx_dpdma_hw_desc {
+ u32 control;
+ u32 desc_id;
+ u32 xfer_size;
+ u32 hsize_stride;
+ u32 timestamp_lsb;
+ u32 timestamp_msb;
+ u32 addr_ext;
+ u32 next_desc;
+ u32 src_addr;
+ u32 addr_ext_23;
+ u32 addr_ext_45;
+ u32 src_addr2;
+ u32 src_addr3;
+ u32 src_addr4;
+ u32 src_addr5;
+ u32 crc;
+} __aligned(XILINX_DPDMA_ALIGN_BYTES);
+
+/**
+ * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
+ * @hw: DPDMA hardware descriptor
+ * @node: list node for software descriptors
+ * @dma_addr: DMA address of the software descriptor
+ */
+struct xilinx_dpdma_sw_desc {
+ struct xilinx_dpdma_hw_desc hw;
+ struct list_head node;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
+ * @vdesc: virtual DMA descriptor
+ * @chan: DMA channel
+ * @descriptors: list of software descriptors
+ * @error: an error has been detected with this descriptor
+ */
+struct xilinx_dpdma_tx_desc {
+ struct virt_dma_desc vdesc;
+ struct xilinx_dpdma_chan *chan;
+ struct list_head descriptors;
+ bool error;
+};
+
+#define to_dpdma_tx_desc(_desc) \
+ container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
+
+/**
+ * struct xilinx_dpdma_chan - DPDMA channel
+ * @vchan: virtual DMA channel
+ * @reg: register base address
+ * @id: channel ID
+ * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @running: true if the channel is running
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+ * @lock: lock to access struct xilinx_dpdma_chan
+ * @desc_pool: descriptor allocation pool
+ * @err_task: error IRQ bottom half handler
+ * @desc: References to descriptors being processed
+ * @desc.pending: Descriptor schedule to the hardware, pending execution
+ * @desc.active: Descriptor being executed by the hardware
+ * @xdev: DPDMA device
+ */
+struct xilinx_dpdma_chan {
+ struct virt_dma_chan vchan;
+ void __iomem *reg;
+ unsigned int id;
+
+ wait_queue_head_t wait_to_stop;
+ bool running;
+ bool first_frame;
+ bool video_group;
+
+ spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
+ struct dma_pool *desc_pool;
+ struct tasklet_struct err_task;
+
+ struct {
+ struct xilinx_dpdma_tx_desc *pending;
+ struct xilinx_dpdma_tx_desc *active;
+ } desc;
+
+ struct xilinx_dpdma_device *xdev;
+};
+
+#define to_xilinx_chan(_chan) \
+ container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
+
+/**
+ * struct xilinx_dpdma_device - DPDMA device
+ * @common: generic dma device structure
+ * @reg: register base address
+ * @dev: generic device structure
+ * @irq: the interrupt number
+ * @axi_clk: axi clock
+ * @chan: DPDMA channels
+ * @ext_addr: flag for 64 bit system (48 bit addressing)
+ */
+struct xilinx_dpdma_device {
+ struct dma_device common;
+ void __iomem *reg;
+ struct device *dev;
+ int irq;
+
+ struct clk *axi_clk;
+ struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
+
+ bool ext_addr;
+};
+
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_irq_done_count;
+ unsigned int chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *name;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read)(char *buf);
+ int (*write)(char *args);
+};
+
+static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_irq_done_count++;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(buf, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_irq_done_count);
+
+ return 0;
+}
+
+static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
+{
+ char *arg;
+ int ret;
+ u32 id;
+
+ arg = strsep(&args, " ");
+ if (!arg || strncasecmp(arg, "start", 5))
+ return -EINVAL;
+
+ arg = strsep(&args, " ");
+ if (!arg)
+ return -EINVAL;
+
+ ret = kstrtou32(arg, 0, &id);
+ if (ret < 0)
+ return ret;
+
+ if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {
+ .name = "DESCRIPTOR_DONE_INTR",
+ .tc = DPDMA_TC_INTR_DONE,
+ .read = xilinx_dpdma_debugfs_desc_done_irq_read,
+ .write = xilinx_dpdma_debugfs_desc_done_irq_write,
+ },
+};
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ enum xilinx_dpdma_testcases testcase;
+ char *kern_buff;
+ int ret = 0;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ testcase = READ_ONCE(dpdma_debugfs.testcase);
+ if (testcase != DPDMA_TC_NONE) {
+ ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
+ if (ret < 0)
+ goto done;
+ } else {
+ strscpy(kern_buff, "No testcase executed",
+ XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
+ }
+
+ size = min(size, strlen(kern_buff));
+ if (copy_to_user(buf, kern_buff, size))
+ ret = -EFAULT;
+
+done:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *testcase;
+ unsigned int i;
+ int ret;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now. */
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0)
+ goto done;
+
+ /* Read the testcase name from a user request. */
+ testcase = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = dpdma_debugfs_reqs[i].write(kern_buff);
+ if (ret < 0)
+ goto done;
+
+ ret = size;
+
+done:
+ kfree(kern_buff_start);
+ return ret;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
+{
+ struct dentry *dent;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
+ NULL, &fops_xilinx_dpdma_dbgfs);
+ if (IS_ERR(dent))
+ dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * I/O Accessors
+ */
+
+static inline u32 dpdma_read(void __iomem *base, u32 offset)
+{
+ return ioread32(base + offset);
+}
+
+static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
+}
+
+static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) | set);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptor Operations
+ */
+
+/**
+ * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
+ * @xdev: DPDMA device
+ * @sw_desc: The software descriptor in which to set DMA addresses
+ * @prev: The previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Set all the DMA addresses in the hardware descriptor corresponding to @dev
+ * from @dma_addr. If a previous descriptor is specified in @prev, its next
+ * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
+ * identical to @sw_desc for cyclic transfers.
+ */
+static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
+ struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+
+ hw_desc->src_addr = lower_32_bits(dma_addr[0]);
+ if (xdev->ext_addr)
+ hw_desc->addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
+ upper_32_bits(dma_addr[0]));
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+
+ addr[i - 1] = lower_32_bits(dma_addr[i]);
+
+ if (xdev->ext_addr) {
+ u32 *addr_ext = &hw_desc->addr_ext_23;
+ u32 addr_msb;
+
+ addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
+ addr_msb <<= 16 * ((i - 1) % 2);
+ addr_ext[(i - 1) / 2] |= addr_msb;
+ }
+ }
+
+ if (!prev)
+ return;
+
+ prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+ if (xdev->ext_addr)
+ prev->hw.addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+ upper_32_bits(sw_desc->dma_addr));
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a software descriptor from the channel's descriptor pool.
+ *
+ * Return: a software descriptor or NULL.
+ */
+static struct xilinx_dpdma_sw_desc *
+xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_addr_t dma_addr;
+
+ sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->dma_addr = dma_addr;
+
+ return sw_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
+ * @chan: DPDMA channel
+ * @sw_desc: software descriptor to free
+ *
+ * Free a software descriptor from the channel's descriptor pool.
+ */
+static void
+xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_sw_desc *sw_desc)
+{
+ dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
+}
+
+/**
+ * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor to dump
+ *
+ * Dump contents of a tx descriptor
+ */
+static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct device *dev = chan->xdev->dev;
+ unsigned int i = 0;
+
+ dev_dbg(dev, "------- TX descriptor dump start -------\n");
+ dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
+
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+
+ dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
+ dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
+ dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
+ dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
+ dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
+ dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
+ dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
+ dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
+ dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
+ dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
+ dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
+ dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
+ dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
+ dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
+ dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
+ dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
+ dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
+ dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
+ }
+
+ dev_dbg(dev, "------- TX descriptor dump end -------\n");
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a tx descriptor.
+ *
+ * Return: a tx descriptor or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+
+ tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
+ if (!tx_desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&tx_desc->descriptors);
+ tx_desc->chan = chan;
+ tx_desc->error = false;
+
+ return tx_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
+ * @vdesc: virtual DMA descriptor
+ *
+ * Free the virtual DMA descriptor @vdesc including its software descriptors.
+ */
+static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc, *next;
+ struct xilinx_dpdma_tx_desc *desc;
+
+ if (!vdesc)
+ return;
+
+ desc = to_dpdma_tx_desc(vdesc);
+
+ list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
+ list_del(&sw_desc->node);
+ xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
+ }
+
+ kfree(desc);
+}
+
+/**
+ * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
+ * descriptor
+ * @chan: DPDMA channel
+ * @xt: dma interleaved template
+ *
+ * Prepare a tx descriptor including internal software/hardware descriptors
+ * based on @xt.
+ *
+ * Return: A DPDMA TX descriptor on success, or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
+ struct dma_interleaved_template *xt)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ size_t hsize = xt->sgl[0].size;
+ size_t stride = hsize + xt->sgl[0].icg;
+
+ if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "chan%u: buffer should be aligned at %d B\n",
+ chan->id, XILINX_DPDMA_ALIGN_BYTES);
+ return NULL;
+ }
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc) {
+ xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+ return NULL;
+ }
+
+ xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
+ &xt->src_start, 1);
+
+ hw_desc = &sw_desc->hw;
+ hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
+ hw_desc->xfer_size = hsize * xt->numf;
+ hw_desc->hsize_stride =
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+ stride / 16);
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ return tx_desc;
+}
+
+/* -----------------------------------------------------------------------------
+ * DPDMA Channel Operations
+ */
+
+/**
+ * xilinx_dpdma_chan_enable - Enable the channel
+ * @chan: DPDMA channel
+ *
+ * Enable the channel and its interrupts. Set the QoS values for video class.
+ */
+static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
+ | XILINX_DPDMA_INTR_GLOBAL_MASK;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
+ | XILINX_DPDMA_INTR_GLOBAL_ERR;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ reg = XILINX_DPDMA_CH_CNTL_ENABLE
+ | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
+ XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
+ | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
+ XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
+ | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
+ XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_disable - Disable the channel
+ * @chan: DPDMA channel
+ *
+ * Disable the channel and its interrupts.
+ */
+static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+}
+
+/**
+ * xilinx_dpdma_chan_pause - Pause the channel
+ * @chan: DPDMA channel
+ *
+ * Pause the channel.
+ */
+static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+/**
+ * xilinx_dpdma_chan_unpause - Unpause the channel
+ * @chan: DPDMA channel
+ *
+ * Unpause the channel.
+ */
+static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ u32 channels = 0;
+ unsigned int i;
+
+ for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
+ if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
+ return 0;
+
+ if (xdev->chan[i]->video_group)
+ channels |= BIT(i);
+ }
+
+ return channels;
+}
+
+/**
+ * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
+ * @chan: DPDMA channel
+ *
+ * Queue the next descriptor, if any, to the hardware. If the channel is
+ * stopped, start it first. Otherwise retrigger it with the next descriptor.
+ */
+static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct xilinx_dpdma_tx_desc *desc;
+ struct virt_dma_desc *vdesc;
+ u32 reg, channels;
+ bool first_frame;
+
+ lockdep_assert_held(&chan->lock);
+
+ if (chan->desc.pending)
+ return;
+
+ if (!chan->running) {
+ xilinx_dpdma_chan_unpause(chan);
+ xilinx_dpdma_chan_enable(chan);
+ chan->first_frame = true;
+ chan->running = true;
+ }
+
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return;
+
+ desc = to_dpdma_tx_desc(vdesc);
+ chan->desc.pending = desc;
+ list_del(&desc->vdesc.node);
+
+ /*
+ * Assign the cookie to descriptors in this transaction. Only 16 bit
+ * will be used, but it should be enough.
+ */
+ list_for_each_entry(sw_desc, &desc->descriptors, node)
+ sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
+
+ sw_desc = list_first_entry(&desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
+ lower_32_bits(sw_desc->dma_addr));
+ if (xdev->ext_addr)
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
+ FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
+ upper_32_bits(sw_desc->dma_addr)));
+
+ first_frame = chan->first_frame;
+ chan->first_frame = false;
+
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ /*
+ * Trigger the transfer only when all channels in the group are
+ * ready.
+ */
+ if (!channels)
+ return;
+ } else {
+ channels = BIT(chan->id);
+ }
+
+ if (first_frame)
+ reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
+ else
+ reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_ostand - Number of outstanding transactions
+ * @chan: DPDMA channel
+ *
+ * Read and return the number of outstanding transactions from register.
+ *
+ * Return: Number of outstanding transactions from the status register.
+ */
+static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
+{
+ return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
+}
+
+/**
+ * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
+ * @chan: DPDMA channel
+ *
+ * Notify waiters for no outstanding event, so waiters can stop the channel
+ * safely. This function is supposed to be called when 'no outstanding'
+ * interrupt is generated. The 'no outstanding' interrupt is disabled and
+ * should be re-enabled when this event is handled. If the channel status
+ * register still shows some number of outstanding transactions, the interrupt
+ * remains enabled.
+ *
+ * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
+ * transaction(s).
+ */
+static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt;
+
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ if (cnt) {
+ dev_dbg(chan->xdev->dev,
+ "chan%u: %d outstanding transactions\n",
+ chan->id, cnt);
+ return -EWOULDBLOCK;
+ }
+
+ /* Disable 'no outstanding' interrupt */
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
+ XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
+ wake_up(&chan->wait_to_stop);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
+ * @chan: DPDMA channel
+ *
+ * Wait for the no outstanding transaction interrupt. This functions can sleep
+ * for 50ms.
+ *
+ * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
+ * from wait_event_interruptible_timeout().
+ */
+static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ int ret;
+
+ /* Wait for a no outstanding transaction interrupt upto 50msec */
+ ret = wait_event_interruptible_timeout(chan->wait_to_stop,
+ !xilinx_dpdma_chan_ostand(chan),
+ msecs_to_jiffies(50));
+ if (ret > 0) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
+ * @chan: DPDMA channel
+ *
+ * Poll the outstanding transaction status, and return when there's no
+ * outstanding transaction. This functions can be used in the interrupt context
+ * or where the atomicity is required. Calling thread may wait more than 50ms.
+ *
+ * Return: 0 on success, or -ETIMEDOUT.
+ */
+static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt, loop = 50000;
+
+ /* Poll at least for 50ms (20 fps). */
+ do {
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ udelay(1);
+ } while (loop-- > 0 && cnt);
+
+ if (loop) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xilinx_dpdma_chan_stop - Stop the channel
+ * @chan: DPDMA channel
+ *
+ * Stop a previously paused channel by first waiting for completion of all
+ * outstanding transaction and then disabling the channel.
+ *
+ * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
+ */
+static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = xilinx_dpdma_chan_wait_no_ostand(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_chan_disable(chan);
+ chan->running = false;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
+ * @chan: DPDMA channel
+ *
+ * Handle completion of the currently active descriptor (@chan->desc.active). As
+ * we currently support cyclic transfers only, this just invokes the cyclic
+ * callback. The descriptor will be completed at the VSYNC interrupt when a new
+ * descriptor replaces it.
+ */
+static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dpdma_debugfs_desc_done_irq(chan);
+
+ active = chan->desc.active;
+ if (active)
+ vchan_cyclic_callback(&active->vdesc);
+ else
+ dev_warn(chan->xdev->dev,
+ "chan%u: DONE IRQ with no active descriptor!\n",
+ chan->id);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
+ * @chan: DPDMA channel
+ *
+ * At VSYNC the active descriptor may have been replaced by the pending
+ * descriptor. Detect this through the DESC_ID and perform appropriate
+ * bookkeeping.
+ */
+static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *pending;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ unsigned long flags;
+ u32 desc_id;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ pending = chan->desc.pending;
+ if (!chan->running || !pending)
+ goto out;
+
+ desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
+
+ /* If the retrigger raced with vsync, retry at the next frame. */
+ sw_desc = list_first_entry(&pending->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (sw_desc->hw.desc_id != desc_id) {
+ dev_dbg(chan->xdev->dev,
+ "chan%u: vsync race lost (%u != %u), retrying\n",
+ chan->id, sw_desc->hw.desc_id, desc_id);
+ goto out;
+ }
+
+ /*
+ * Complete the active descriptor, if any, promote the pending
+ * descriptor to active, and queue the next transfer, if any.
+ */
+ if (chan->desc.active)
+ vchan_cookie_complete(&chan->desc.active->vdesc);
+ chan->desc.active = pending;
+ chan->desc.pending = NULL;
+
+ xilinx_dpdma_chan_queue_transfer(chan);
+
+out:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_err - Detect any channel error
+ * @chan: DPDMA channel
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: true if any channel error occurs, or false otherwise.
+ */
+static bool
+xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
+{
+ if (!chan)
+ return false;
+
+ if (chan->running &&
+ ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
+ (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
+ * @chan: DPDMA channel
+ *
+ * This function is called when any channel error or any global error occurs.
+ * The function disables the paused channel by errors and determines
+ * if the current active descriptor can be rescheduled depending on
+ * the descriptor status.
+ */
+static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct xilinx_dpdma_tx_desc *active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
+ chan->id,
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
+ dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
+ chan->id,
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
+
+ xilinx_dpdma_chan_disable(chan);
+ chan->running = false;
+
+ if (!chan->desc.active)
+ goto out_unlock;
+
+ active = chan->desc.active;
+ chan->desc.active = NULL;
+
+ xilinx_dpdma_chan_dump_tx_desc(chan, active);
+
+ if (active->error)
+ dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
+ chan->id);
+
+ /* Reschedule if there's no new descriptor */
+ if (!chan->desc.pending &&
+ list_empty(&chan->vchan.desc_issued)) {
+ active->error = true;
+ list_add_tail(&active->vdesc.node,
+ &chan->vchan.desc_issued);
+ } else {
+ xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Engine Operations
+ */
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dpdma_tx_desc *desc;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
+ return NULL;
+
+ desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
+
+ return &desc->vdesc.tx;
+}
+
+/**
+ * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
+ * @dchan: DMA channel
+ *
+ * Allocate a descriptor pool for the channel.
+ *
+ * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
+ */
+static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
+
+ chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
+ chan->xdev->dev,
+ sizeof(struct xilinx_dpdma_sw_desc),
+ align, 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->xdev->dev,
+ "chan%u: failed to allocate a descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_free_chan_resources - Free all resources for the channel
+ * @dchan: DMA channel
+ *
+ * Free resources associated with the virtual DMA channel, and destroy the
+ * descriptor pool.
+ */
+static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ vchan_free_chan_resources(&chan->vchan);
+
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (vchan_issue_pending(&chan->vchan))
+ xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int xilinx_dpdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dpdma_peripheral_config *pconfig;
+ unsigned long flags;
+
+ /*
+ * The destination address doesn't need to be specified as the DPDMA is
+ * hardwired to the destination (the DP controller). The transfer
+ * width, burst size and port window size are thus meaningless, they're
+ * fixed both on the DPDMA side and on the DP controller side.
+ */
+
+ /*
+ * Use the peripheral_config to indicate that the channel is part
+ * of a video group. This requires matching use of the custom
+ * structure in each driver.
+ */
+ pconfig = config->peripheral_config;
+ if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
+ chan->video_group = pconfig->video_group;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+static int xilinx_dpdma_pause(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_resume(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
+ * @dchan: DMA channel
+ *
+ * Pause the channel without waiting for ongoing transfers to complete. Waiting
+ * for completion is performed by xilinx_dpdma_synchronize() that will disable
+ * the channel to complete the stop.
+ *
+ * All the descriptors associated with the channel that are guaranteed not to
+ * be touched by the hardware. The pending and active descriptor are not
+ * touched, and will be freed either upon completion, or by
+ * xilinx_dpdma_synchronize().
+ *
+ * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
+ */
+static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ LIST_HEAD(descriptors);
+ unsigned long flags;
+ unsigned int i;
+
+ /* Pause the channel (including the whole video group if applicable). */
+ if (chan->video_group) {
+ for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->running) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ } else {
+ xilinx_dpdma_chan_pause(chan);
+ }
+
+ /* Gather all the descriptors we can free and free them. */
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&chan->vchan, &descriptors);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &descriptors);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_synchronize - Synchronize callback execution
+ * @dchan: DMA channel
+ *
+ * Synchronizing callback execution ensures that all previously issued
+ * transfers have completed and all associated callbacks have been called and
+ * have returned.
+ *
+ * This function waits for the DMA channel to stop. It assumes it has been
+ * paused by a previous call to dmaengine_terminate_async(), and that no new
+ * pending descriptors have been issued with dma_async_issue_pending(). The
+ * behaviour is undefined otherwise.
+ */
+static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ xilinx_dpdma_chan_stop(chan);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (chan->desc.pending) {
+ vchan_terminate_vdesc(&chan->desc.pending->vdesc);
+ chan->desc.pending = NULL;
+ }
+ if (chan->desc.active) {
+ vchan_terminate_vdesc(&chan->desc.active->vdesc);
+ chan->desc.active = NULL;
+ }
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_synchronize(&chan->vchan);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and Tasklet Handling
+ */
+
+/**
+ * xilinx_dpdma_err - Detect any global error
+ * @isr: Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: True if any global error occurs, or false otherwise.
+ */
+static bool xilinx_dpdma_err(u32 isr, u32 eisr)
+{
+ if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
+ eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
+ * @xdev: DPDMA device
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Handle if any error occurs based on @isr and @eisr. This function disables
+ * corresponding error interrupts, and those should be re-enabled once handling
+ * is done.
+ */
+static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
+ u32 isr, u32 eisr)
+{
+ bool err = xilinx_dpdma_err(isr, eisr);
+ unsigned int i;
+
+ dev_dbg_ratelimited(xdev->dev,
+ "error irq: isr = 0x%08x, eisr = 0x%08x\n",
+ isr, eisr);
+
+ /* Disable channel error interrupts until errors are handled. */
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
+ isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
+ eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
+
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
+ if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
+ tasklet_schedule(&xdev->chan[i]->err_task);
+}
+
+/**
+ * xilinx_dpdma_enable_irq - Enable interrupts
+ * @xdev: DPDMA device
+ *
+ * Enable interrupts.
+ */
+static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_disable_irq - Disable interrupts
+ * @xdev: DPDMA device
+ *
+ * Disable interrupts.
+ */
+static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
+ * @t: pointer to the tasklet associated with this handler
+ *
+ * Per channel error handling tasklet. This function waits for the outstanding
+ * transaction to complete and triggers error handling. After error handling,
+ * re-enable channel error interrupts, and restart the channel if needed.
+ */
+static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
+{
+ struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ unsigned long flags;
+
+ /* Proceed error handling even when polling fails. */
+ xilinx_dpdma_chan_poll_no_ostand(chan);
+
+ xilinx_dpdma_chan_handle_err(chan);
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dpdma_device *xdev = data;
+ unsigned long mask;
+ unsigned int i;
+ u32 status;
+ u32 error;
+
+ status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
+ error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
+ if (!status && !error)
+ return IRQ_NONE;
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
+
+ if (status & XILINX_DPDMA_INTR_VSYNC) {
+ /*
+ * There's a single VSYNC interrupt that needs to be processed
+ * by each running channel to update the active descriptor.
+ */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ struct xilinx_dpdma_chan *chan = xdev->chan[i];
+
+ if (chan)
+ xilinx_dpdma_chan_vsync_irq(chan);
+ }
+ }
+
+ mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
+ if (mask) {
+ for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
+ xilinx_dpdma_chan_done_irq(xdev->chan[i]);
+ }
+
+ mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
+ if (mask) {
+ for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
+ xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
+ }
+
+ mask = status & XILINX_DPDMA_INTR_ERR_ALL;
+ if (mask || error)
+ xilinx_dpdma_handle_err_irq(xdev, mask, error);
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization & Cleanup
+ */
+
+static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
+ unsigned int chan_id)
+{
+ struct xilinx_dpdma_chan *chan;
+
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->id = chan_id;
+ chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * chan->id;
+ chan->running = false;
+ chan->xdev = xdev;
+
+ spin_lock_init(&chan->lock);
+ init_waitqueue_head(&chan->wait_to_stop);
+
+ tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
+
+ chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
+ vchan_init(&chan->vchan, &xdev->common);
+
+ xdev->chan[chan->id] = chan;
+
+ return 0;
+}
+
+static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
+{
+ if (!chan)
+ return;
+
+ tasklet_kill(&chan->err_task);
+ list_del(&chan->vchan.chan.device_node);
+}
+
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
+ u32 chan_id = dma_spec->args[0];
+
+ if (chan_id >= ARRAY_SIZE(xdev->chan))
+ return NULL;
+
+ if (!xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
+}
+
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+ void __iomem *reg;
+
+ /* Disable all interrupts */
+ xilinx_dpdma_disable_irq(xdev);
+
+ /* Stop all channels */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * i;
+ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+ }
+
+ /* Clear the interrupt status registers */
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
+static int xilinx_dpdma_probe(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ struct dma_device *ddev;
+ unsigned int i;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ xdev->ext_addr = sizeof(dma_addr_t) > 4;
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ platform_set_drvdata(pdev, xdev);
+
+ xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
+ if (IS_ERR(xdev->axi_clk))
+ return PTR_ERR(xdev->axi_clk);
+
+ xdev->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->reg))
+ return PTR_ERR(xdev->reg);
+
+ dpdma_hw_init(xdev);
+
+ xdev->irq = platform_get_irq(pdev, 0);
+ if (xdev->irq < 0)
+ return xdev->irq;
+
+ ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
+ dev_name(xdev->dev), xdev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ ddev = &xdev->common;
+ ddev->dev = &pdev->dev;
+
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
+ dma_cap_set(DMA_REPEAT, ddev->cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
+ ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
+
+ ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
+ /* TODO: Can we achieve better granularity ? */
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = xilinx_dpdma_issue_pending;
+ ddev->device_config = xilinx_dpdma_config;
+ ddev->device_pause = xilinx_dpdma_pause;
+ ddev->device_resume = xilinx_dpdma_resume;
+ ddev->device_terminate_all = xilinx_dpdma_terminate_all;
+ ddev->device_synchronize = xilinx_dpdma_synchronize;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
+ ddev->directions = BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
+ ret = xilinx_dpdma_chan_init(xdev, i);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to initialize channel %u\n",
+ i);
+ goto error;
+ }
+ }
+
+ ret = clk_prepare_enable(xdev->axi_clk);
+ if (ret) {
+ dev_err(xdev->dev, "failed to enable the axi clock\n");
+ goto error;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error_dma_async;
+ }
+
+ ret = of_dma_controller_register(xdev->dev->of_node,
+ of_dma_xilinx_xlate, ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ xilinx_dpdma_enable_irq(xdev);
+
+ xilinx_dpdma_debugfs_init(xdev);
+
+ dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
+
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+error_dma_async:
+ clk_disable_unprepare(xdev->axi_clk);
+error:
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ free_irq(xdev->irq, xdev);
+
+ return ret;
+}
+
+static int xilinx_dpdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ /* Start by disabling the IRQ to avoid races during cleanup. */
+ free_irq(xdev->irq, xdev);
+
+ xilinx_dpdma_disable_irq(xdev);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&xdev->common);
+ clk_disable_unprepare(xdev->axi_clk);
+
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dpdma_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dpdma",},
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
+
+static struct platform_driver xilinx_dpdma_driver = {
+ .probe = xilinx_dpdma_probe,
+ .remove = xilinx_dpdma_remove,
+ .driver = {
+ .name = "xilinx-zynqmp-dpdma",
+ .of_match_table = xilinx_dpdma_of_match,
+ },
+};
+
+module_platform_driver(xilinx_dpdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
new file mode 100644
index 000000000..21472a5d7
--- /dev/null
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -0,0 +1,1182 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx ZynqMP DMA Engine
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pm_runtime.h>
+
+#include "../dmaengine.h"
+
+/* Register Offsets */
+#define ZYNQMP_DMA_ISR 0x100
+#define ZYNQMP_DMA_IMR 0x104
+#define ZYNQMP_DMA_IER 0x108
+#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_CTRL0 0x110
+#define ZYNQMP_DMA_CTRL1 0x114
+#define ZYNQMP_DMA_DATA_ATTR 0x120
+#define ZYNQMP_DMA_DSCR_ATTR 0x124
+#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128
+#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C
+#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130
+#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134
+#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138
+#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C
+#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140
+#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144
+#define ZYNQMP_DMA_SRC_START_LSB 0x158
+#define ZYNQMP_DMA_SRC_START_MSB 0x15C
+#define ZYNQMP_DMA_DST_START_LSB 0x160
+#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_TOTAL_BYTE 0x188
+#define ZYNQMP_DMA_RATE_CTRL 0x18C
+#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
+#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
+#define ZYNQMP_DMA_CTRL2 0x200
+
+/* Interrupt registers bit field definitions */
+#define ZYNQMP_DMA_DONE BIT(10)
+#define ZYNQMP_DMA_AXI_WR_DATA BIT(9)
+#define ZYNQMP_DMA_AXI_RD_DATA BIT(8)
+#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7)
+#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6)
+#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5)
+#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4)
+#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3)
+#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2)
+#define ZYNQMP_DMA_INV_APB BIT(0)
+
+/* Control 0 register bit field definitions */
+#define ZYNQMP_DMA_OVR_FETCH BIT(7)
+#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6)
+#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3)
+
+/* Control 1 register bit field definitions */
+#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0)
+
+/* Data Attribute register bit field definitions */
+#define ZYNQMP_DMA_ARBURST GENMASK(27, 26)
+#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22)
+#define ZYNQMP_DMA_ARCACHE_OFST 22
+#define ZYNQMP_DMA_ARQOS GENMASK(21, 18)
+#define ZYNQMP_DMA_ARQOS_OFST 18
+#define ZYNQMP_DMA_ARLEN GENMASK(17, 14)
+#define ZYNQMP_DMA_ARLEN_OFST 14
+#define ZYNQMP_DMA_AWBURST GENMASK(13, 12)
+#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8)
+#define ZYNQMP_DMA_AWCACHE_OFST 8
+#define ZYNQMP_DMA_AWQOS GENMASK(7, 4)
+#define ZYNQMP_DMA_AWQOS_OFST 4
+#define ZYNQMP_DMA_AWLEN GENMASK(3, 0)
+#define ZYNQMP_DMA_AWLEN_OFST 0
+
+/* Descriptor Attribute register bit field definitions */
+#define ZYNQMP_DMA_AXCOHRNT BIT(8)
+#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4)
+#define ZYNQMP_DMA_AXCACHE_OFST 4
+#define ZYNQMP_DMA_AXQOS GENMASK(3, 0)
+#define ZYNQMP_DMA_AXQOS_OFST 0
+
+/* Control register 2 bit field definitions */
+#define ZYNQMP_DMA_ENABLE BIT(0)
+
+/* Buffer Descriptor definitions */
+#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10
+#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4
+#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2
+#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1
+
+/* Interrupt Mask specific definitions */
+#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \
+ ZYNQMP_DMA_AXI_WR_DATA | \
+ ZYNQMP_DMA_AXI_RD_DST_DSCR | \
+ ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
+ ZYNQMP_DMA_INV_APB)
+#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
+ ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
+ ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
+#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
+ ZYNQMP_DMA_INT_ERR | \
+ ZYNQMP_DMA_INT_OVRFL | \
+ ZYNQMP_DMA_DST_DSCR_DONE)
+
+/* Max number of descriptors per channel */
+#define ZYNQMP_DMA_NUM_DESCS 32
+
+/* Max transfer size per descriptor */
+#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
+/* Reset values for data attributes */
+#define ZYNQMP_DMA_AXCACHE_VAL 0xF
+
+#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
+
+#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF
+
+/* Bus width in bits */
+#define ZYNQMP_DMA_BUS_WIDTH_64 64
+#define ZYNQMP_DMA_BUS_WIDTH_128 128
+
+#define ZDMA_PM_TIMEOUT 100
+
+#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
+
+#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
+ common)
+#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
+ async_tx)
+
+/**
+ * struct zynqmp_dma_desc_ll - Hw linked list descriptor
+ * @addr: Buffer address
+ * @size: Size of the buffer
+ * @ctrl: Control word
+ * @nxtdscraddr: Next descriptor base address
+ * @rsvd: Reserved field and for Hw internal use.
+ */
+struct zynqmp_dma_desc_ll {
+ u64 addr;
+ u32 size;
+ u32 ctrl;
+ u64 nxtdscraddr;
+ u64 rsvd;
+};
+
+/**
+ * struct zynqmp_dma_desc_sw - Per Transaction structure
+ * @src: Source address for simple mode dma
+ * @dst: Destination address for simple mode dma
+ * @len: Transfer length for simple mode dma
+ * @node: Node in the channel descriptor list
+ * @tx_list: List head for the current transfer
+ * @async_tx: Async transaction descriptor
+ * @src_v: Virtual address of the src descriptor
+ * @src_p: Physical address of the src descriptor
+ * @dst_v: Virtual address of the dst descriptor
+ * @dst_p: Physical address of the dst descriptor
+ */
+struct zynqmp_dma_desc_sw {
+ u64 src;
+ u64 dst;
+ u32 len;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+ struct zynqmp_dma_desc_ll *src_v;
+ dma_addr_t src_p;
+ struct zynqmp_dma_desc_ll *dst_v;
+ dma_addr_t dst_p;
+};
+
+/**
+ * struct zynqmp_dma_chan - Driver specific DMA channel structure
+ * @zdev: Driver specific device structure
+ * @regs: Control registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @free_list: Descriptors free
+ * @active_list: Descriptors active
+ * @sw_desc_pool: SW descriptor pool
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool_v: Statically allocated descriptor base
+ * @desc_pool_p: Physical allocated descriptor base
+ * @desc_free_cnt: Descriptor available count
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @is_dmacoherent: Tells whether dma operations are coherent or not
+ * @tasklet: Cleanup work after irq
+ * @idle : Channel status;
+ * @desc_size: Size of the low level descriptor
+ * @err: Channel has errors
+ * @bus_width: Bus width
+ * @src_burst_len: Source burst length
+ * @dst_burst_len: Dest burst length
+ */
+struct zynqmp_dma_chan {
+ struct zynqmp_dma_device *zdev;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head free_list;
+ struct list_head active_list;
+ struct zynqmp_dma_desc_sw *sw_desc_pool;
+ struct list_head done_list;
+ struct dma_chan common;
+ void *desc_pool_v;
+ dma_addr_t desc_pool_p;
+ u32 desc_free_cnt;
+ struct device *dev;
+ int irq;
+ bool is_dmacoherent;
+ struct tasklet_struct tasklet;
+ bool idle;
+ size_t desc_size;
+ bool err;
+ u32 bus_width;
+ u32 src_burst_len;
+ u32 dst_burst_len;
+};
+
+/**
+ * struct zynqmp_dma_device - DMA device structure
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
+ */
+struct zynqmp_dma_device {
+ struct device *dev;
+ struct dma_device common;
+ struct zynqmp_dma_chan *chan;
+ struct clk *clk_main;
+ struct clk *clk_apb;
+};
+
+static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
+ u64 value)
+{
+ lo_hi_writeq(value, chan->regs + reg);
+}
+
+/**
+ * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
+ * @chan: ZynqMP DMA DMA channel pointer
+ * @desc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *desc)
+{
+ dma_addr_t addr;
+
+ addr = desc->src_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
+ addr = desc->dst_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
+}
+
+/**
+ * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @desc: Hw descriptor pointer
+ */
+static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
+ void *desc)
+{
+ struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
+
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
+ hw++;
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
+}
+
+/**
+ * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Hw descriptor pointer
+ * @src: Source buffer address
+ * @dst: Destination buffer address
+ * @len: Transfer length
+ * @prev: Previous hw descriptor pointer
+ */
+static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_ll *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t len,
+ struct zynqmp_dma_desc_ll *prev)
+{
+ struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
+
+ sdesc->size = ddesc->size = len;
+ sdesc->addr = src;
+ ddesc->addr = dst;
+
+ sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
+ if (chan->is_dmacoherent) {
+ sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ }
+
+ if (prev) {
+ dma_addr_t addr = chan->desc_pool_p +
+ ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
+ ddesc = prev + 1;
+ prev->nxtdscraddr = addr;
+ ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+}
+
+/**
+ * zynqmp_dma_init - Initialize the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
+{
+ u32 val;
+
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ val = readl(chan->regs + ZYNQMP_DMA_ISR);
+ writel(val, chan->regs + ZYNQMP_DMA_ISR);
+
+ if (chan->is_dmacoherent) {
+ val = ZYNQMP_DMA_AXCOHRNT;
+ val = (val & ~ZYNQMP_DMA_AXCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
+ writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
+ }
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ if (chan->is_dmacoherent) {
+ val = (val & ~ZYNQMP_DMA_ARCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
+ val = (val & ~ZYNQMP_DMA_AWCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
+ }
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+
+ /* Clearing the interrupt account rgisters */
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+ chan->idle = true;
+}
+
+/**
+ * zynqmp_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct zynqmp_dma_chan *chan = to_chan(tx->chan);
+ struct zynqmp_dma_desc_sw *desc, *new;
+ dma_cookie_t cookie;
+ unsigned long irqflags;
+
+ new = tx_to_desc(tx);
+ spin_lock_irqsave(&chan->lock, irqflags);
+ cookie = dma_cookie_assign(tx);
+
+ if (!list_empty(&chan->pending_list)) {
+ desc = list_last_entry(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!list_empty(&desc->tx_list))
+ desc = list_last_entry(&desc->tx_list,
+ struct zynqmp_dma_desc_sw, node);
+ desc->src_v->nxtdscraddr = new->src_p;
+ desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ desc->dst_v->nxtdscraddr = new->dst_p;
+ desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ }
+
+ list_add_tail(&new->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+
+ return cookie;
+}
+
+/**
+ * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
+ * @chan: ZynqMP DMA channel pointer
+ *
+ * Return: The sw descriptor
+ */
+static struct zynqmp_dma_desc_sw *
+zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ desc = list_first_entry(&chan->free_list,
+ struct zynqmp_dma_desc_sw, node);
+ list_del(&desc->node);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ /* Clear the src and dst descriptor memory */
+ memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+ memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+
+ return desc;
+}
+
+/**
+ * zynqmp_dma_free_descriptor - Issue pending transactions
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *sdesc)
+{
+ struct zynqmp_dma_desc_sw *child, *next;
+
+ chan->desc_free_cnt++;
+ list_move_tail(&sdesc->node, &chan->free_list);
+ list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
+ chan->desc_free_cnt++;
+ list_move_tail(&child->node, &chan->free_list);
+ }
+}
+
+/**
+ * zynqmp_dma_free_desc_list - Free descriptors list
+ * @chan: ZynqMP DMA channel pointer
+ * @list: List to parse and delete the descriptor
+ */
+static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
+ struct list_head *list)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node)
+ zynqmp_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * zynqmp_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ struct zynqmp_dma_desc_sw *desc;
+ int i, ret;
+
+ ret = pm_runtime_resume_and_get(chan->dev);
+ if (ret < 0)
+ return ret;
+
+ chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc),
+ GFP_KERNEL);
+ if (!chan->sw_desc_pool)
+ return -ENOMEM;
+
+ chan->idle = true;
+ chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
+
+ INIT_LIST_HEAD(&chan->free_list);
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
+ list_add_tail(&desc->node, &chan->free_list);
+ }
+
+ chan->desc_pool_v = dma_alloc_coherent(chan->dev,
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
+ ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
+ if (!chan->desc_pool_v)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
+ desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
+ desc->src_p = chan->desc_pool_p +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
+ desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+
+ return ZYNQMP_DMA_NUM_DESCS;
+}
+
+/**
+ * zynqmp_dma_start - Start DMA channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
+{
+ writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
+ chan->idle = false;
+ writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
+}
+
+/**
+ * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
+ * @chan: ZynqMP DMA channel pointer
+ * @status: Interrupt status value
+ */
+static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+{
+ if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
+ if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+ readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
+ readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+}
+
+static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+{
+ u32 val, burst_val;
+
+ val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+ val |= ZYNQMP_DMA_POINT_TYPE_SG;
+ writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
+ val = (val & ~ZYNQMP_DMA_ARLEN) |
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
+ val = (val & ~ZYNQMP_DMA_AWLEN) |
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+}
+
+/**
+ * zynqmp_dma_device_config - Zynqmp dma device configuration
+ * @dchan: DMA channel
+ * @config: DMA device config
+ *
+ * Return: 0 always
+ */
+static int zynqmp_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_start_transfer - Initiate the new transfer
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ if (!chan->idle)
+ return;
+
+ zynqmp_dma_config(chan);
+
+ desc = list_first_entry_or_null(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ zynqmp_dma_update_desc_to_ctrlr(chan, desc);
+ zynqmp_dma_start(chan);
+}
+
+
+/**
+ * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @chan: ZynqMP DMA channel
+ */
+static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+ spin_lock_irqsave(&chan->lock, irqflags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ zynqmp_dma_free_descriptor(chan, desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+}
+
+/**
+ * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ desc = list_first_entry_or_null(&chan->active_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ zynqmp_dma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+}
+
+/**
+ * zynqmp_dma_free_descriptors - Free channel descriptors
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ zynqmp_dma_free_desc_list(chan, &chan->active_list);
+ zynqmp_dma_free_desc_list(chan, &chan->pending_list);
+ zynqmp_dma_free_desc_list(chan, &chan->done_list);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+}
+
+/**
+ * zynqmp_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ zynqmp_dma_free_descriptors(chan);
+ dma_free_coherent(chan->dev,
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
+ chan->desc_pool_v, chan->desc_pool_p);
+ kfree(chan->sw_desc_pool);
+ pm_runtime_mark_last_busy(chan->dev);
+ pm_runtime_put_autosuspend(chan->dev);
+}
+
+/**
+ * zynqmp_dma_reset - Reset the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
+{
+ unsigned long irqflags;
+
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ zynqmp_dma_complete_descriptor(chan);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+ zynqmp_dma_chan_desc_cleanup(chan);
+ zynqmp_dma_free_descriptors(chan);
+
+ zynqmp_dma_init(chan);
+}
+
+/**
+ * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the ZynqMP DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ u32 isr, imr, status;
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = readl(chan->regs + ZYNQMP_DMA_ISR);
+ imr = readl(chan->regs + ZYNQMP_DMA_IMR);
+ status = isr & ~imr;
+
+ writel(isr, chan->regs + ZYNQMP_DMA_ISR);
+ if (status & ZYNQMP_DMA_INT_DONE) {
+ tasklet_schedule(&chan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_DONE)
+ chan->idle = true;
+
+ if (status & ZYNQMP_DMA_INT_ERR) {
+ chan->err = true;
+ tasklet_schedule(&chan->tasklet);
+ dev_err(chan->dev, "Channel %p has errors\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_INT_OVRFL) {
+ zynqmp_dma_handle_ovfl_int(chan, status);
+ dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_dma_do_tasklet - Schedule completion tasklet
+ * @t: Pointer to the ZynqMP DMA channel structure
+ */
+static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
+{
+ struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet);
+ u32 count;
+ unsigned long irqflags;
+
+ if (chan->err) {
+ zynqmp_dma_reset(chan);
+ chan->err = false;
+ return;
+ }
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ while (count) {
+ zynqmp_dma_complete_descriptor(chan);
+ count--;
+ }
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+
+ zynqmp_dma_chan_desc_cleanup(chan);
+
+ if (chan->idle) {
+ spin_lock_irqsave(&chan->lock, irqflags);
+ zynqmp_dma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+ }
+}
+
+/**
+ * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
+ * @dchan: DMA channel pointer
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ zynqmp_dma_free_descriptors(chan);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context.
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_synchronize(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/**
+ * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, ulong flags)
+{
+ struct zynqmp_dma_chan *chan;
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ void *desc = NULL, *prev = NULL;
+ size_t copy;
+ u32 desc_cnt;
+ unsigned long irqflags;
+
+ chan = to_chan(dchan);
+
+ desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_irqsave(&chan->lock, irqflags);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+
+ do {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+
+ copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
+ dma_dst, copy, prev);
+ prev = desc;
+ len -= copy;
+ dma_src += copy;
+ dma_dst += copy;
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ async_tx_ack(&first->async_tx);
+ first->async_tx.flags = (enum dma_ctrl_flags)flags;
+ return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_chan_remove - Channel remove function
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
+{
+ if (!chan)
+ return;
+
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * zynqmp_dma_chan_probe - Per Channel Probing
+ * @zdev: Driver specific device structure
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+ struct platform_device *pdev)
+{
+ struct zynqmp_dma_chan *chan;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ int err;
+
+ chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = zdev->dev;
+ chan->zdev = zdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chan->regs))
+ return PTR_ERR(chan->regs);
+
+ chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
+ err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+ if (err < 0) {
+ dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+ return err;
+ }
+
+ if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
+ chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
+ dev_err(zdev->dev, "invalid bus-width value");
+ return -EINVAL;
+ }
+
+ chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
+ zdev->chan = chan;
+ tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->free_list);
+
+ dma_cookie_init(&chan->common);
+ chan->common.device = &zdev->common;
+ list_add_tail(&chan->common.device_node, &zdev->common.channels);
+
+ zynqmp_dma_init(chan);
+ chan->irq = platform_get_irq(pdev, 0);
+ if (chan->irq < 0)
+ return -ENXIO;
+ err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
+ "zynqmp-dma", chan);
+ if (err)
+ return err;
+
+ chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
+ chan->idle = true;
+ return 0;
+}
+
+/**
+ * of_zynqmp_dma_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&zdev->chan->common);
+}
+
+/**
+ * zynqmp_dma_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_resume - Resume from suspend
+ * @dev: Address of the device structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(zdev->clk_main);
+ clk_disable_unprepare(zdev->clk_apb);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(zdev->clk_main);
+ if (err) {
+ dev_err(dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(zdev->clk_apb);
+ if (err) {
+ dev_err(dev, "Unable to enable apb clock.\n");
+ clk_disable_unprepare(zdev->clk_main);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
+ SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
+ zynqmp_dma_runtime_resume, NULL)
+};
+
+/**
+ * zynqmp_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev;
+ struct dma_device *p;
+ int ret;
+
+ zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&zdev->common.channels);
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
+
+ p = &zdev->common;
+ p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
+ p->device_terminate_all = zynqmp_dma_device_terminate_all;
+ p->device_synchronize = zynqmp_dma_synchronize;
+ p->device_issue_pending = zynqmp_dma_issue_pending;
+ p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
+ p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
+ p->device_tx_status = dma_cookie_status;
+ p->device_config = zynqmp_dma_device_config;
+ p->dev = &pdev->dev;
+
+ zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(zdev->clk_main))
+ return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main),
+ "main clock not found.\n");
+
+ zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(zdev->clk_apb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb),
+ "apb clock not found.\n");
+
+ platform_set_drvdata(pdev, zdev);
+ pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(zdev->dev);
+ pm_runtime_enable(zdev->dev);
+ ret = pm_runtime_resume_and_get(zdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "device wakeup failed.\n");
+ pm_runtime_disable(zdev->dev);
+ }
+ if (!pm_runtime_enabled(zdev->dev)) {
+ ret = zynqmp_dma_runtime_resume(zdev->dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = zynqmp_dma_chan_probe(zdev, pdev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Probing channel failed\n");
+ goto err_disable_pm;
+ }
+
+ p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
+ p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
+
+ ret = dma_async_device_register(&zdev->common);
+ if (ret) {
+ dev_err(zdev->dev, "failed to register the dma device\n");
+ goto free_chan_resources;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_zynqmp_dma_xlate, zdev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&zdev->common);
+ goto free_chan_resources;
+ }
+
+ pm_runtime_mark_last_busy(zdev->dev);
+ pm_runtime_put_sync_autosuspend(zdev->dev);
+
+ return 0;
+
+free_chan_resources:
+ zynqmp_dma_chan_remove(zdev->chan);
+err_disable_pm:
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
+ return ret;
+}
+
+/**
+ * zynqmp_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&zdev->common);
+
+ zynqmp_dma_chan_remove(zdev->chan);
+ pm_runtime_disable(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
+
+static struct platform_driver zynqmp_dma_driver = {
+ .driver = {
+ .name = "xilinx-zynqmp-dma",
+ .of_match_table = zynqmp_dma_of_match,
+ .pm = &zynqmp_dma_dev_pm_ops,
+ },
+ .probe = zynqmp_dma_probe,
+ .remove = zynqmp_dma_remove,
+};
+
+module_platform_driver(zynqmp_dma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");