diff options
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r-- | drivers/dma/xilinx/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 2799 | ||||
-rw-r--r-- | drivers/dma/xilinx/zynqmp_dma.c | 1161 |
3 files changed, 3962 insertions, 0 deletions
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile new file mode 100644 index 000000000..9e91f8f5b --- /dev/null +++ b/drivers/dma/xilinx/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c new file mode 100644 index 000000000..3f38df6b5 --- /dev/null +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -0,0 +1,2799 @@ +/* + * DMA driver for Xilinx Video DMA Engine + * + * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * Based on the Freescale DMA driver. + * + * Description: + * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP + * core that provides high-bandwidth direct memory access between memory + * and AXI4-Stream type video target peripherals. The core provides efficient + * two dimensional DMA operations with independent asynchronous read (S2MM) + * and write (MM2S) channel operation. It can be configured to have either + * one channel or two channels. If configured as two channels, one is to + * transmit to the video device (MM2S) and another is to receive from the + * video device (S2MM). Initialization, status, interrupt and management + * registers are accessed through an AXI4-Lite slave interface. + * + * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that + * provides high-bandwidth one dimensional direct memory access between memory + * and AXI4-Stream target peripherals. It supports one receive and one + * transmit channel, both of them optional at synthesis time. + * + * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory + * Access (DMA) between a memory-mapped source address and a memory-mapped + * destination address. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_dma.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "../dmaengine.h" + +/* Register/Descriptor Offsets */ +#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 +#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 +#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 +#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 + +/* Control Registers */ +#define XILINX_DMA_REG_DMACR 0x0000 +#define XILINX_DMA_DMACR_DELAY_MAX 0xff +#define XILINX_DMA_DMACR_DELAY_SHIFT 24 +#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff +#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 +#define XILINX_DMA_DMACR_ERR_IRQ BIT(14) +#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) +#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) +#define XILINX_DMA_DMACR_MASTER_SHIFT 8 +#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 +#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) +#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) +#define XILINX_DMA_DMACR_RESET BIT(2) +#define XILINX_DMA_DMACR_CIRC_EN BIT(1) +#define XILINX_DMA_DMACR_RUNSTOP BIT(0) +#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) + +#define XILINX_DMA_REG_DMASR 0x0004 +#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) +#define XILINX_DMA_DMASR_ERR_IRQ BIT(14) +#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) +#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) +#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) +#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) +#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) +#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) +#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) +#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) +#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) +#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) +#define XILINX_DMA_DMASR_IDLE BIT(1) +#define XILINX_DMA_DMASR_HALTED BIT(0) +#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) + +#define XILINX_DMA_REG_CURDESC 0x0008 +#define XILINX_DMA_REG_TAILDESC 0x0010 +#define XILINX_DMA_REG_REG_INDEX 0x0014 +#define XILINX_DMA_REG_FRMSTORE 0x0018 +#define XILINX_DMA_REG_THRESHOLD 0x001c +#define XILINX_DMA_REG_FRMPTR_STS 0x0024 +#define XILINX_DMA_REG_PARK_PTR 0x0028 +#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 +#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) +#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 +#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) +#define XILINX_DMA_REG_VDMA_VERSION 0x002c + +/* Register Direct Mode Registers */ +#define XILINX_DMA_REG_VSIZE 0x0000 +#define XILINX_DMA_REG_HSIZE 0x0004 + +#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 +#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 +#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 + +#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) +#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) + +#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec +#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) + +/* HW specific definitions */ +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 + +#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ + (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ + XILINX_DMA_DMASR_DLY_CNT_IRQ | \ + XILINX_DMA_DMASR_ERR_IRQ) + +#define XILINX_DMA_DMASR_ALL_ERR_MASK \ + (XILINX_DMA_DMASR_EOL_LATE_ERR | \ + XILINX_DMA_DMASR_SOF_LATE_ERR | \ + XILINX_DMA_DMASR_SG_DEC_ERR | \ + XILINX_DMA_DMASR_SG_SLV_ERR | \ + XILINX_DMA_DMASR_EOF_EARLY_ERR | \ + XILINX_DMA_DMASR_SOF_EARLY_ERR | \ + XILINX_DMA_DMASR_DMA_DEC_ERR | \ + XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ + XILINX_DMA_DMASR_DMA_INT_ERR) + +/* + * Recoverable errors are DMA Internal error, SOF Early, EOF Early + * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC + * is enabled in the h/w system. + */ +#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ + (XILINX_DMA_DMASR_SOF_LATE_ERR | \ + XILINX_DMA_DMASR_EOF_EARLY_ERR | \ + XILINX_DMA_DMASR_SOF_EARLY_ERR | \ + XILINX_DMA_DMASR_DMA_INT_ERR) + +/* Axi VDMA Flush on Fsync bits */ +#define XILINX_DMA_FLUSH_S2MM 3 +#define XILINX_DMA_FLUSH_MM2S 2 +#define XILINX_DMA_FLUSH_BOTH 1 + +/* Delay loop counter to prevent hardware failure */ +#define XILINX_DMA_LOOP_COUNT 1000000 + +/* AXI DMA Specific Registers/Offsets */ +#define XILINX_DMA_REG_SRCDSTADDR 0x18 +#define XILINX_DMA_REG_BTT 0x28 + +/* AXI DMA Specific Masks/Bit fields */ +#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) +#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) +#define XILINX_DMA_CR_COALESCE_SHIFT 16 +#define XILINX_DMA_BD_SOP BIT(27) +#define XILINX_DMA_BD_EOP BIT(26) +#define XILINX_DMA_COALESCE_MAX 255 +#define XILINX_DMA_NUM_DESCS 255 +#define XILINX_DMA_NUM_APP_WORDS 5 + +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + +/* AXI CDMA Specific Registers/Offsets */ +#define XILINX_CDMA_REG_SRCADDR 0x18 +#define XILINX_CDMA_REG_DSTADDR 0x20 + +/* AXI CDMA Specific Masks */ +#define XILINX_CDMA_CR_SGMODE BIT(3) + +/** + * struct xilinx_vdma_desc_hw - Hardware Descriptor + * @next_desc: Next Descriptor Pointer @0x00 + * @pad1: Reserved @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @vsize: Vertical Size @0x10 + * @hsize: Horizontal Size @0x14 + * @stride: Number of bytes between the first + * pixels of each horizontal line @0x18 + */ +struct xilinx_vdma_desc_hw { + u32 next_desc; + u32 pad1; + u32 buf_addr; + u32 buf_addr_msb; + u32 vsize; + u32 hsize; + u32 stride; +} __aligned(64); + +/** + * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA + * @next_desc: Next Descriptor Pointer @0x00 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @mcdma_control: Control field for mcdma @0x10 + * @vsize_stride: Vsize and Stride field for mcdma @0x14 + * @control: Control field @0x18 + * @status: Status field @0x1C + * @app: APP Fields @0x20 - 0x30 + */ +struct xilinx_axidma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 buf_addr; + u32 buf_addr_msb; + u32 mcdma_control; + u32 vsize_stride; + u32 control; + u32 status; + u32 app[XILINX_DMA_NUM_APP_WORDS]; +} __aligned(64); + +/** + * struct xilinx_cdma_desc_hw - Hardware Descriptor + * @next_desc: Next Descriptor Pointer @0x00 + * @next_desc_msb: Next Descriptor Pointer MSB @0x04 + * @src_addr: Source address @0x08 + * @src_addr_msb: Source address MSB @0x0C + * @dest_addr: Destination address @0x10 + * @dest_addr_msb: Destination address MSB @0x14 + * @control: Control field @0x18 + * @status: Status field @0x1C + */ +struct xilinx_cdma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 src_addr; + u32 src_addr_msb; + u32 dest_addr; + u32 dest_addr_msb; + u32 control; + u32 status; +} __aligned(64); + +/** + * struct xilinx_vdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_vdma_tx_segment { + struct xilinx_vdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_axidma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_axidma_tx_segment { + struct xilinx_axidma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_cdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_cdma_tx_segment { + struct xilinx_cdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_dma_tx_descriptor - Per Transaction structure + * @async_tx: Async transaction descriptor + * @segments: TX segments list + * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. + */ +struct xilinx_dma_tx_descriptor { + struct dma_async_tx_descriptor async_tx; + struct list_head segments; + struct list_head node; + bool cyclic; +}; + +/** + * struct xilinx_dma_chan - Driver specific DMA channel structure + * @xdev: Driver specific device structure + * @ctrl_offset: Control registers offset + * @desc_offset: TX descriptor registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @active_list: Descriptors ready to submit + * @done_list: Complete descriptors + * @free_seg_list: Free descriptors + * @common: DMA common channel + * @desc_pool: Descriptors pool + * @dev: The dma device + * @irq: Channel IRQ + * @id: Channel ID + * @direction: Transfer direction + * @num_frms: Number of frames + * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. + * @genlock: Support genlock mode + * @err: Channel has errors + * @idle: Check for channel idle + * @terminating: Check for channel being synchronized by user + * @tasklet: Cleanup work after irq + * @config: Device configuration info + * @flush_on_fsync: Flush on Frame sync + * @desc_pendingcount: Descriptor pending count + * @ext_addr: Indicates 64 bit addressing is supported by dma channel + * @desc_submitcount: Descriptor h/w submitted count + * @residue: Residue for AXI DMA + * @seg_v: Statically allocated segments base + * @seg_p: Physical allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers + * @cyclic_seg_p: Physical allocated segments base for cyclic dma + * @start_transfer: Differentiate b/w DMA IP's transfer + * @stop_transfer: Differentiate b/w DMA IP's quiesce + * @tdest: TDEST value for mcdma + * @has_vflip: S2MM vertical flip + */ +struct xilinx_dma_chan { + struct xilinx_dma_device *xdev; + u32 ctrl_offset; + u32 desc_offset; + spinlock_t lock; + struct list_head pending_list; + struct list_head active_list; + struct list_head done_list; + struct list_head free_seg_list; + struct dma_chan common; + struct dma_pool *desc_pool; + struct device *dev; + int irq; + int id; + enum dma_transfer_direction direction; + int num_frms; + bool has_sg; + bool cyclic; + bool genlock; + bool err; + bool idle; + bool terminating; + struct tasklet_struct tasklet; + struct xilinx_vdma_config config; + bool flush_on_fsync; + u32 desc_pendingcount; + bool ext_addr; + u32 desc_submitcount; + u32 residue; + struct xilinx_axidma_tx_segment *seg_v; + dma_addr_t seg_p; + struct xilinx_axidma_tx_segment *cyclic_seg_v; + dma_addr_t cyclic_seg_p; + void (*start_transfer)(struct xilinx_dma_chan *chan); + int (*stop_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; + bool has_vflip; +}; + +/** + * enum xdma_ip_type - DMA IP type. + * + * @XDMA_TYPE_AXIDMA: Axi dma ip. + * @XDMA_TYPE_CDMA: Axi cdma ip. + * @XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +}; + +struct xilinx_dma_config { + enum xdma_ip_type dmatype; + int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **txs_clk, + struct clk **rx_clk, struct clk **rxs_clk); +}; + +/** + * struct xilinx_dma_device - DMA device structure + * @regs: I/O mapped base address + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not + * @flush_on_fsync: Flush on frame sync + * @ext_addr: Indicates 64 bit addressing is supported by dma device + * @pdev: Platform device structure pointer + * @dma_config: DMA config structure + * @axi_clk: DMA Axi4-lite interace clock + * @tx_clk: DMA mm2s clock + * @txs_clk: DMA mm2s stream clock + * @rx_clk: DMA s2mm clock + * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier + */ +struct xilinx_dma_device { + void __iomem *regs; + struct device *dev; + struct dma_device common; + struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; + bool has_sg; + bool mcdma; + u32 flush_on_fsync; + bool ext_addr; + struct platform_device *pdev; + const struct xilinx_dma_config *dma_config; + struct clk *axi_clk; + struct clk *tx_clk; + struct clk *txs_clk; + struct clk *rx_clk; + struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; +}; + +/* Macros */ +#define to_xilinx_chan(chan) \ + container_of(chan, struct xilinx_dma_chan, common) +#define to_dma_tx_descriptor(tx) \ + container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) +#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ + readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ + val, cond, delay_us, timeout_us) + +/* IO accessors */ +static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) +{ + return ioread32(chan->xdev->regs + reg); +} + +static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) +{ + iowrite32(value, chan->xdev->regs + reg); +} + +static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, + u32 value) +{ + dma_write(chan, chan->desc_offset + reg, value); +} + +static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) +{ + return dma_read(chan, chan->ctrl_offset + reg); +} + +static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, + u32 value) +{ + dma_write(chan, chan->ctrl_offset + reg, value); +} + +static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, + u32 clr) +{ + dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); +} + +static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, + u32 set) +{ + dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); +} + +/** + * vdma_desc_write_64 - 64-bit descriptor write + * @chan: Driver specific VDMA channel + * @reg: Register to write + * @value_lsb: lower address of the descriptor. + * @value_msb: upper address of the descriptor. + * + * Since vdma driver is trying to write to a register offset which is not a + * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits + * instead of a single 64 bit register write. + */ +static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, + u32 value_lsb, u32 value_msb) +{ + /* Write the lsb 32 bits*/ + writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); + + /* Write the msb 32 bits */ + writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); +} + +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + +/* ----------------------------------------------------------------------------- + * Descriptors and segments alloc and free + */ + +/** + * xilinx_vdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_vdma_tx_segment * +xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_vdma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + segment->phys = phys; + + return segment; +} + +/** + * xilinx_cdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_cdma_tx_segment * +xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_cdma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + segment->phys = phys; + + return segment; +} + +/** + * xilinx_axidma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_axidma_tx_segment * +xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_axidma_tx_segment *segment = NULL; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (!list_empty(&chan->free_seg_list)) { + segment = list_first_entry(&chan->free_seg_list, + struct xilinx_axidma_tx_segment, + node); + list_del(&segment->node); + } + spin_unlock_irqrestore(&chan->lock, flags); + + return segment; +} + +static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) +{ + u32 next_desc = hw->next_desc; + u32 next_desc_msb = hw->next_desc_msb; + + memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); + + hw->next_desc = next_desc; + hw->next_desc_msb = next_desc_msb; +} + +/** + * xilinx_dma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_axidma_tx_segment *segment) +{ + xilinx_dma_clean_hw_desc(&segment->hw); + + list_add_tail(&segment->node, &chan->free_seg_list); +} + +/** + * xilinx_cdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_cdma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_vdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_vdma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_dma_tx_descriptor - Allocate transaction descriptor + * @chan: Driver specific DMA channel + * + * Return: The allocated descriptor on success and NULL on failure. + */ +static struct xilinx_dma_tx_descriptor * +xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return NULL; + + INIT_LIST_HEAD(&desc->segments); + + return desc; +} + +/** + * xilinx_dma_free_tx_descriptor - Free transaction descriptor + * @chan: Driver specific DMA channel + * @desc: DMA transaction descriptor + */ +static void +xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_vdma_tx_segment *segment, *next; + struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; + struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; + + if (!desc) + return; + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + list_for_each_entry_safe(segment, next, &desc->segments, node) { + list_del(&segment->node); + xilinx_vdma_free_tx_segment(chan, segment); + } + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + list_for_each_entry_safe(cdma_segment, cdma_next, + &desc->segments, node) { + list_del(&cdma_segment->node); + xilinx_cdma_free_tx_segment(chan, cdma_segment); + } + } else { + list_for_each_entry_safe(axidma_segment, axidma_next, + &desc->segments, node) { + list_del(&axidma_segment->node); + xilinx_dma_free_tx_segment(chan, axidma_segment); + } + } + + kfree(desc); +} + +/* Required functions */ + +/** + * xilinx_dma_free_desc_list - Free descriptors list + * @chan: Driver specific DMA channel + * @list: List to parse and delete the descriptor + */ +static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, + struct list_head *list) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) { + list_del(&desc->node); + xilinx_dma_free_tx_descriptor(chan, desc); + } +} + +/** + * xilinx_dma_free_descriptors - Free channel descriptors + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) +{ + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + xilinx_dma_free_desc_list(chan, &chan->pending_list); + xilinx_dma_free_desc_list(chan, &chan->done_list); + xilinx_dma_free_desc_list(chan, &chan->active_list); + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel + */ +static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + unsigned long flags; + + dev_dbg(chan->dev, "Free all channel resources.\n"); + + xilinx_dma_free_descriptors(chan); + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + spin_lock_irqsave(&chan->lock, flags); + INIT_LIST_HEAD(&chan->free_seg_list); + spin_unlock_irqrestore(&chan->lock, flags); + + /* Free memory that is allocated for BD */ + dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, chan->seg_v, + chan->seg_p); + + /* Free Memory that is allocated for cyclic DMA Mode */ + dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), + chan->cyclic_seg_v, chan->cyclic_seg_p); + } + + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; + } +} + +/** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** + * xilinx_dma_chan_desc_cleanup - Clean channel descriptors + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + struct dmaengine_desc_callback cb; + + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + + /* Remove from the list of running transactions */ + list_del(&desc->node); + + /* Run the link descriptor callback function */ + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { + spin_unlock_irqrestore(&chan->lock, flags); + dmaengine_desc_callback_invoke(&cb, NULL); + spin_lock_irqsave(&chan->lock, flags); + } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + xilinx_dma_free_tx_descriptor(chan, desc); + + /* + * While we ran a callback the user called a terminate function, + * which takes care of cleaning up any remaining descriptors + */ + if (chan->terminating) + break; + } + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the Xilinx DMA channel structure + */ +static void xilinx_dma_do_tasklet(unsigned long data) +{ + struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; + + xilinx_dma_chan_desc_cleanup(chan); +} + +/** + * xilinx_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + int i; + + /* Has this channel already been allocated? */ + if (chan->desc_pool) + return 0; + + /* + * We need the descriptor to be aligned to 64bytes + * for meeting Xilinx VDMA specification requirement. + */ + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* Allocate the buffer descriptors. */ + chan->seg_v = dma_zalloc_coherent(chan->dev, + sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); + if (!chan->seg_v) { + dev_err(chan->dev, + "unable to allocate channel %d descriptors\n", + chan->id); + return -ENOMEM; + } + + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { + chan->seg_v[i].hw.next_desc = + lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_v[i].hw.next_desc_msb = + upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_v[i].phys = chan->seg_p + + sizeof(*chan->seg_v) * i; + list_add_tail(&chan->seg_v[i].node, + &chan->free_seg_list); + } + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", + chan->dev, + sizeof(struct xilinx_cdma_tx_segment), + __alignof__(struct xilinx_cdma_tx_segment), + 0); + } else { + chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", + chan->dev, + sizeof(struct xilinx_vdma_tx_segment), + __alignof__(struct xilinx_vdma_tx_segment), + 0); + } + + if (!chan->desc_pool && + (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { + dev_err(chan->dev, + "unable to allocate channel %d descriptor pool\n", + chan->id); + return -ENOMEM; + } + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, + sizeof(*chan->cyclic_seg_v), + &chan->cyclic_seg_p, GFP_KERNEL); + if (!chan->cyclic_seg_v) { + dev_err(chan->dev, + "unable to allocate desc segment for cyclic DMA\n"); + return -ENOMEM; + } + chan->cyclic_seg_v->phys = chan->cyclic_seg_p; + } + + dma_cookie_init(dchan); + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* For AXI DMA resetting once channel will reset the + * other channel as well so enable the interrupts here. + */ + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + } + + if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + return 0; +} + +/** + * xilinx_dma_tx_status - Get DMA transaction status + * @dchan: DMA channel + * @cookie: Transaction identifier + * @txstate: Transaction state + * + * Return: DMA transaction status + */ +static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + enum dma_status ret; + unsigned long flags; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + spin_lock_irqsave(&chan->lock, flags); + + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); + if (chan->has_sg) { + list_for_each_entry(segment, &desc->segments, node) { + hw = &segment->hw; + residue += (hw->control - hw->status) & + XILINX_DMA_MAX_TRANS_LEN; + } + } + spin_unlock_irqrestore(&chan->lock, flags); + + chan->residue = residue; + dma_set_residue(txstate, chan->residue); + } + + return ret; +} + +/** + * xilinx_dma_stop_transfer - Halt DMA channel + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) +{ + u32 val; + + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); + + /* Wait for the hardware to halt */ + return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + val & XILINX_DMA_DMASR_HALTED, 0, + XILINX_DMA_LOOP_COUNT); +} + +/** + * xilinx_cdma_stop_transfer - Wait for the current transfer to complete + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) +{ + u32 val; + + return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + val & XILINX_DMA_DMASR_IDLE, 0, + XILINX_DMA_LOOP_COUNT); +} + +/** + * xilinx_dma_start - Start DMA channel + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_start(struct xilinx_dma_chan *chan) +{ + int err; + u32 val; + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); + + /* Wait for the hardware to start */ + err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + !(val & XILINX_DMA_DMASR_HALTED), 0, + XILINX_DMA_LOOP_COUNT); + + if (err) { + dev_err(chan->dev, "Cannot start channel %p: %x\n", + chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + + chan->err = true; + } +} + +/** + * xilinx_vdma_start_transfer - Starts VDMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_vdma_config *config = &chan->config; + struct xilinx_dma_tx_descriptor *desc, *tail_desc; + u32 reg, j; + struct xilinx_vdma_tx_segment *tail_segment; + + /* This function was invoked with lock held */ + if (chan->err) + return; + + if (!chan->idle) + return; + + if (list_empty(&chan->pending_list)) + return; + + desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_vdma_tx_segment, node); + + /* + * If hardware is idle, then all descriptors on the running lists are + * done, start new transfers + */ + if (chan->has_sg) + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + desc->async_tx.phys); + + /* Configure the hardware using info in the config structure */ + if (chan->has_vflip) { + reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); + reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; + reg |= config->vflip_en; + dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, + reg); + } + + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + if (config->frm_cnt_en) + reg |= XILINX_DMA_DMACR_FRAMECNT_EN; + else + reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; + + /* + * With SG, start with circular mode, so that BDs can be fetched. + * In direct register mode, if not parking, enable circular mode + */ + if (chan->has_sg || !config->park) + reg |= XILINX_DMA_DMACR_CIRC_EN; + + if (config->park) + reg &= ~XILINX_DMA_DMACR_CIRC_EN; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + j = chan->desc_submitcount; + reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); + if (chan->direction == DMA_MEM_TO_DEV) { + reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; + } else { + reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; + } + dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); + + /* Start the hardware */ + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + if (chan->has_sg) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + } else { + struct xilinx_vdma_tx_segment *segment, *last = NULL; + int i = 0; + + if (chan->desc_submitcount < chan->num_frms) + i = chan->desc_submitcount; + + list_for_each_entry(segment, &desc->segments, node) { + if (chan->ext_addr) + vdma_desc_write_64(chan, + XILINX_VDMA_REG_START_ADDRESS_64(i++), + segment->hw.buf_addr, + segment->hw.buf_addr_msb); + else + vdma_desc_write(chan, + XILINX_VDMA_REG_START_ADDRESS(i++), + segment->hw.buf_addr); + + last = segment; + } + + if (!last) + return; + + /* HW expects these parameters to be same for one transaction */ + vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); + vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, + last->hw.stride); + vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); + + chan->desc_submitcount++; + chan->desc_pendingcount--; + list_del(&desc->node); + list_add_tail(&desc->node, &chan->active_list); + if (chan->desc_submitcount == chan->num_frms) + chan->desc_submitcount = 0; + } + + chan->idle = false; +} + +/** + * xilinx_cdma_start_transfer - Starts cdma transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_cdma_tx_segment *tail_segment; + u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); + + if (chan->err) + return; + + if (!chan->idle) + return; + + if (list_empty(&chan->pending_list)) + return; + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_cdma_tx_segment, node); + + if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { + ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; + ctrl_reg |= chan->desc_pendingcount << + XILINX_DMA_CR_COALESCE_SHIFT; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); + } + + if (chan->has_sg) { + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + /* Update tail ptr register which will start the transfer */ + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + /* In simple mode */ + struct xilinx_cdma_tx_segment *segment; + struct xilinx_cdma_desc_hw *hw; + + segment = list_first_entry(&head_desc->segments, + struct xilinx_cdma_tx_segment, + node); + + hw = &segment->hw; + + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + + /* Start the transfer */ + dma_ctrl_write(chan, XILINX_DMA_REG_BTT, + hw->control & XILINX_DMA_MAX_TRANS_LEN); + } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + chan->idle = false; +} + +/** + * xilinx_dma_start_transfer - Starts DMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_axidma_tx_segment *tail_segment; + u32 reg; + + if (chan->err) + return; + + if (list_empty(&chan->pending_list)) + return; + + if (!chan->idle) + return; + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, node); + + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { + reg &= ~XILINX_DMA_CR_COALESCE_MAX; + reg |= chan->desc_pendingcount << + XILINX_DMA_CR_COALESCE_SHIFT; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + } + + if (chan->has_sg && !chan->xdev->mcdma) + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } + + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->cyclic) + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } + } else { + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + segment = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, + node); + hw = &segment->hw; + + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + + /* Start the transfer */ + dma_ctrl_write(chan, XILINX_DMA_REG_BTT, + hw->control & XILINX_DMA_MAX_TRANS_LEN); + } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + chan->idle = false; +} + +/** + * xilinx_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel + */ +static void xilinx_dma_issue_pending(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + chan->start_transfer(chan); + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_complete_descriptor - Mark the active descriptor as complete + * @chan : xilinx DMA channel + * + * CONTEXT: hardirq + */ +static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + + /* This function was invoked with lock held */ + if (list_empty(&chan->active_list)) + return; + + list_for_each_entry_safe(desc, next, &chan->active_list, node) { + list_del(&desc->node); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); + } +} + +/** + * xilinx_dma_reset - Reset DMA channel + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_reset(struct xilinx_dma_chan *chan) +{ + int err; + u32 tmp; + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); + + /* Wait for the hardware to finish reset */ + err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, + !(tmp & XILINX_DMA_DMACR_RESET), 0, + XILINX_DMA_LOOP_COUNT); + + if (err) { + dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", + dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), + dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + return -ETIMEDOUT; + } + + chan->err = false; + chan->idle = true; + chan->desc_pendingcount = 0; + chan->desc_submitcount = 0; + + return err; +} + +/** + * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) +{ + int err; + + /* Reset VDMA */ + err = xilinx_dma_reset(chan); + if (err) + return err; + + /* Enable interrupts */ + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + return 0; +} + +/** + * xilinx_dma_irq_handler - DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Xilinx DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) +{ + struct xilinx_dma_chan *chan = data; + u32 status; + + /* Read the status and ack the interrupts. */ + status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); + if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) + return IRQ_NONE; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, + status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + if (status & XILINX_DMA_DMASR_ERR_IRQ) { + /* + * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the + * error is recoverable, ignore it. Otherwise flag the error. + * + * Only recoverable errors can be cleared in the DMASR register, + * make sure not to write to other error bits to 1. + */ + u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, + errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); + + if (!chan->flush_on_fsync || + (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { + dev_err(chan->dev, + "Channel %p has errors %x, cdr %x tdr %x\n", + chan, errors, + dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), + dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); + chan->err = true; + } + } + + if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { + /* + * Device takes too long to do the transfer when user requires + * responsiveness. + */ + dev_dbg(chan->dev, "Inter-packet latency too long\n"); + } + + if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { + spin_lock(&chan->lock); + xilinx_dma_complete_descriptor(chan); + chan->idle = true; + chan->start_transfer(chan); + spin_unlock(&chan->lock); + } + + tasklet_schedule(&chan->tasklet); + return IRQ_HANDLED; +} + +/** + * append_desc_queue - Queuing descriptor + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + */ +static void append_desc_queue(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_vdma_tx_segment *tail_segment; + struct xilinx_dma_tx_descriptor *tail_desc; + struct xilinx_axidma_tx_segment *axidma_tail_segment; + struct xilinx_cdma_tx_segment *cdma_tail_segment; + + if (list_empty(&chan->pending_list)) + goto append; + + /* + * Add the hardware descriptor to the chain of hardware descriptors + * that already exists in memory. + */ + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_vdma_tx_segment, + node); + tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + cdma_tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_cdma_tx_segment, + node); + cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } else { + axidma_tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, + node); + axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } + + /* + * Add the software descriptor and all children to the list + * of pending transactions + */ +append: + list_add_tail(&desc->node, &chan->pending_list); + chan->desc_pendingcount++; + + if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) + && unlikely(chan->desc_pendingcount > chan->num_frms)) { + dev_dbg(chan->dev, "desc pendingcount is too high\n"); + chan->desc_pendingcount = chan->num_frms; + } +} + +/** + * xilinx_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor + * + * Return: cookie value on success and failure value on error + */ +static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); + struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + int err; + + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + + if (chan->err) { + /* + * If reset fails, need to hard reset the system. + * Channel is no longer functional + */ + err = xilinx_dma_chan_reset(chan); + if (err < 0) + return err; + } + + spin_lock_irqsave(&chan->lock, flags); + + cookie = dma_cookie_assign(tx); + + /* Put this transaction onto the tail of the pending queue */ + append_desc_queue(chan, desc); + + if (desc->cyclic) + chan->cyclic = true; + + chan->terminating = false; + + spin_unlock_irqrestore(&chan->lock, flags); + + return cookie; +} + +/** + * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_vdma_tx_segment *segment; + struct xilinx_vdma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + async_tx_ack(&desc->async_tx); + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_vdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* Fill in the hardware descriptor */ + hw = &segment->hw; + hw->vsize = xt->numf; + hw->hsize = xt->sgl[0].size; + hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << + XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; + hw->stride |= chan->config.frm_dly << + XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; + + if (xt->dir != DMA_MEM_TO_DEV) { + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(xt->dst_start); + hw->buf_addr_msb = upper_32_bits(xt->dst_start); + } else { + hw->buf_addr = xt->dst_start; + } + } else { + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(xt->src_start); + hw->buf_addr_msb = upper_32_bits(xt->src_start); + } else { + hw->buf_addr = xt->src_start; + } + } + + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction + * @dchan: DMA channel + * @dma_dst: destination address + * @dma_src: source address + * @len: transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_cdma_tx_segment *segment; + struct xilinx_cdma_desc_hw *hw; + + if (!len || len > XILINX_DMA_MAX_TRANS_LEN) + return NULL; + + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_cdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + hw->control = len; + hw->src_addr = dma_src; + hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } + + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + + desc->async_tx.phys = segment->phys; + hw->next_desc = segment->phys; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @dchan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction direction, unsigned long flags, + void *context) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment = NULL; + u32 *app_w = (u32 *)context; + struct scatterlist *sg; + size_t copy; + size_t sg_used; + unsigned int i; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Build transactions using information in the scatter gather list */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; + + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + + /* Fill in the descriptor */ + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); + + hw->control = copy; + + if (chan->direction == DMA_MEM_TO_DEV) { + if (app_w) + memcpy(hw->app, app_w, sizeof(u32) * + XILINX_DMA_NUM_APP_WORDS); + } + + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (chan->direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @dchan: DMA channel + * @buf_addr: Physical address of the buffer + * @buf_len: Total length of the cyclic buffers + * @period_len: length of individual cyclic buffer + * @direction: DMA direction + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + if (!period_len) + return NULL; + + num_periods = buf_len / period_len; + + if (!num_periods) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.next_desc = (u32) head_segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + head_segment->hw.control |= XILINX_DMA_BD_SOP; + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_terminate_all - Halt the channel and free descriptors + * @dchan: Driver specific DMA Channel pointer + * + * Return: '0' always. + */ +static int xilinx_dma_terminate_all(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + int err; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); + + err = chan->stop_transfer(chan); + if (err) { + dev_err(chan->dev, "Cannot stop channel %p: %x\n", + chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + chan->err = true; + } + + /* Remove and free all of the descriptors in the lists */ + chan->terminating = true; + xilinx_dma_free_descriptors(chan); + chan->idle = true; + + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + + if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + return 0; +} + +/** + * xilinx_dma_channel_set_config - Configure VDMA channel + * Run-time configuration for Axi VDMA, supports: + * . halt the channel + * . configure interrupt coalescing and inter-packet delay threshold + * . start/stop parking + * . enable genlock + * + * @dchan: DMA channel + * @cfg: VDMA device configuration pointer + * + * Return: '0' on success and failure value on error + */ +int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + struct xilinx_vdma_config *cfg) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 dmacr; + + if (cfg->reset) + return xilinx_dma_chan_reset(chan); + + dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + chan->config.frm_dly = cfg->frm_dly; + chan->config.park = cfg->park; + + /* genlock settings */ + chan->config.gen_lock = cfg->gen_lock; + chan->config.master = cfg->master; + + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; + if (cfg->gen_lock && chan->genlock) { + dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; + dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; + } + + chan->config.frm_cnt_en = cfg->frm_cnt_en; + chan->config.vflip_en = cfg->vflip_en; + + if (cfg->park) + chan->config.park_frm = cfg->park_frm; + else + chan->config.park_frm = -1; + + chan->config.coalesc = cfg->coalesc; + chan->config.delay = cfg->delay; + + if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; + dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; + chan->config.coalesc = cfg->coalesc; + } + + if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; + dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; + chan->config.delay = cfg->delay; + } + + /* FSync Source selection */ + dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; + dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); + + return 0; +} +EXPORT_SYMBOL(xilinx_vdma_channel_set_config); + +/* ----------------------------------------------------------------------------- + * Probe and remove + */ + +/** + * xilinx_dma_chan_remove - Per Channel remove function + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) +{ + /* Disable all interrupts */ + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + if (chan->irq > 0) + free_irq(chan->irq, chan); + + tasklet_kill(&chan->tasklet); + + list_del(&chan->common.device_node); +} + +static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **rx_clk, + struct clk **sg_clk, struct clk **tmp_clk) +{ + int err; + + *tmp_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + return err; + } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; + + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); + if (IS_ERR(*sg_clk)) + *sg_clk = NULL; + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*sg_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); + goto err_disable_rxclk; + } + + return 0; + +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); +err_disable_txclk: + clk_disable_unprepare(*tx_clk); +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **dev_clk, struct clk **tmp_clk, + struct clk **tmp1_clk, struct clk **tmp2_clk) +{ + int err; + + *tmp_clk = NULL; + *tmp1_clk = NULL; + *tmp2_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); + return err; + } + + *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); + if (IS_ERR(*dev_clk)) { + err = PTR_ERR(*dev_clk); + dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); + return err; + } + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); + return err; + } + + err = clk_prepare_enable(*dev_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); + goto err_disable_axiclk; + } + + return 0; + +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **txs_clk, + struct clk **rx_clk, struct clk **rxs_clk) +{ + int err; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + return err; + } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; + + *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); + if (IS_ERR(*txs_clk)) + *txs_clk = NULL; + + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + + *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); + if (IS_ERR(*rxs_clk)) + *rxs_clk = NULL; + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*txs_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); + goto err_disable_txsclk; + } + + err = clk_prepare_enable(*rxs_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); + goto err_disable_rxclk; + } + + return 0; + +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); +err_disable_txsclk: + clk_disable_unprepare(*txs_clk); +err_disable_txclk: + clk_disable_unprepare(*tx_clk); +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static void xdma_disable_allclks(struct xilinx_dma_device *xdev) +{ + clk_disable_unprepare(xdev->rxs_clk); + clk_disable_unprepare(xdev->rx_clk); + clk_disable_unprepare(xdev->txs_clk); + clk_disable_unprepare(xdev->tx_clk); + clk_disable_unprepare(xdev->axi_clk); +} + +/** + * xilinx_dma_chan_probe - Per Channel Probing + * It get channel features from the device tree entry and + * initialize special channel handling routines + * + * @xdev: Driver specific device structure + * @node: Device node + * @chan_id: DMA Channel id + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + struct device_node *node, int chan_id) +{ + struct xilinx_dma_chan *chan; + bool has_dre = false; + u32 value, width; + int err; + + /* Allocate and initialize the channel structure */ + chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + + chan->dev = xdev->dev; + chan->xdev = xdev; + chan->has_sg = xdev->has_sg; + chan->desc_pendingcount = 0x0; + chan->ext_addr = xdev->ext_addr; + /* This variable ensures that descriptors are not + * Submitted when dma engine is in progress. This variable is + * Added to avoid polling for a bit in the status register to + * Know dma state in the driver hot path. + */ + chan->idle = true; + + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->free_seg_list); + + /* Retrieve the channel properties from the device tree */ + has_dre = of_property_read_bool(node, "xlnx,include-dre"); + + chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); + + err = of_property_read_u32(node, "xlnx,datawidth", &value); + if (err) { + dev_err(xdev->dev, "missing xlnx,datawidth property\n"); + return err; + } + width = value >> 3; /* Convert bits to bytes */ + + /* If data width is greater than 8 bytes, DRE is not in hw */ + if (width > 8) + has_dre = false; + + if (!has_dre) + xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { + chan->direction = DMA_MEM_TO_DEV; + chan->id = chan_id; + chan->tdest = chan_id; + + chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + chan->config.park = 1; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) + chan->flush_on_fsync = true; + } + } else if (of_device_is_compatible(node, + "xlnx,axi-vdma-s2mm-channel") || + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { + chan->direction = DMA_DEV_TO_MEM; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; + chan->has_vflip = of_property_read_bool(node, + "xlnx,enable-vert-flip"); + if (chan->has_vflip) { + chan->config.vflip_en = dma_read(chan, + XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & + XILINX_VDMA_ENABLE_VERTICAL_FLIP; + } + + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + chan->config.park = 1; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) + chan->flush_on_fsync = true; + } + } else { + dev_err(xdev->dev, "Invalid channel compatible node\n"); + return -EINVAL; + } + + /* Request the interrupt */ + chan->irq = irq_of_parse_and_map(node, 0); + err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, + "xilinx-dma-controller", chan); + if (err) { + dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); + return err; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + chan->start_transfer = xilinx_dma_start_transfer; + chan->stop_transfer = xilinx_dma_stop_transfer; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + chan->start_transfer = xilinx_cdma_start_transfer; + chan->stop_transfer = xilinx_cdma_stop_transfer; + } else { + chan->start_transfer = xilinx_vdma_start_transfer; + chan->stop_transfer = xilinx_dma_stop_transfer; + } + + /* Initialize the tasklet */ + tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, + (unsigned long)chan); + + /* + * Initialize the DMA channel and add it to the DMA engine channels + * list. + */ + chan->common.device = &xdev->common; + + list_add_tail(&chan->common.device_node, &xdev->common.channels); + xdev->chan[chan->id] = chan; + + /* Reset the channel */ + err = xilinx_dma_chan_reset(chan); + if (err < 0) { + dev_err(xdev->dev, "Reset channel failed\n"); + return err; + } + + return 0; +} + +/** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) +{ + int ret, i; + u32 nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** + * of_dma_xilinx_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct xilinx_dma_device *xdev = ofdma->of_dma_data; + int chan_id = dma_spec->args[0]; + + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) + return NULL; + + return dma_get_slave_channel(&xdev->chan[chan_id]->common); +} + +static const struct xilinx_dma_config axidma_config = { + .dmatype = XDMA_TYPE_AXIDMA, + .clk_init = axidma_clk_init, +}; + +static const struct xilinx_dma_config axicdma_config = { + .dmatype = XDMA_TYPE_CDMA, + .clk_init = axicdma_clk_init, +}; + +static const struct xilinx_dma_config axivdma_config = { + .dmatype = XDMA_TYPE_VDMA, + .clk_init = axivdma_clk_init, +}; + +static const struct of_device_id xilinx_dma_of_ids[] = { + { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, + { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, + { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, + {} +}; +MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); + +/** + * xilinx_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_probe(struct platform_device *pdev) +{ + int (*clk_init)(struct platform_device *, struct clk **, struct clk **, + struct clk **, struct clk **, struct clk **) + = axivdma_clk_init; + struct device_node *node = pdev->dev.of_node; + struct xilinx_dma_device *xdev; + struct device_node *child, *np = pdev->dev.of_node; + struct resource *io; + u32 num_frames, addr_width; + int i, err; + + /* Allocate and initialize the DMA engine structure */ + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + xdev->dev = &pdev->dev; + if (np) { + const struct of_device_id *match; + + match = of_match_node(xilinx_dma_of_ids, np); + if (match && match->data) { + xdev->dma_config = match->data; + clk_init = xdev->dma_config->clk_init; + } + } + + err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, + &xdev->rx_clk, &xdev->rxs_clk); + if (err) + return err; + + /* Request and map I/O memory */ + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xdev->regs = devm_ioremap_resource(&pdev->dev, io); + if (IS_ERR(xdev->regs)) + return PTR_ERR(xdev->regs); + + /* Retrieve the DMA engine properties from the device tree */ + xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + err = of_property_read_u32(node, "xlnx,num-fstores", + &num_frames); + if (err < 0) { + dev_err(xdev->dev, + "missing xlnx,num-fstores property\n"); + return err; + } + + err = of_property_read_u32(node, "xlnx,flush-fsync", + &xdev->flush_on_fsync); + if (err < 0) + dev_warn(xdev->dev, + "missing xlnx,flush-fsync property\n"); + } + + err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); + if (err < 0) + dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); + + if (addr_width > 32) + xdev->ext_addr = true; + else + xdev->ext_addr = false; + + /* Set the dma mask bits */ + dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + + /* Initialize the DMA engine */ + xdev->common.dev = &pdev->dev; + + INIT_LIST_HEAD(&xdev->common.channels); + if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { + dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); + } + + xdev->common.device_alloc_chan_resources = + xilinx_dma_alloc_chan_resources; + xdev->common.device_free_chan_resources = + xilinx_dma_free_chan_resources; + xdev->common.device_terminate_all = xilinx_dma_terminate_all; + xdev->common.device_tx_status = xilinx_dma_tx_status; + xdev->common.device_issue_pending = xilinx_dma_issue_pending; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); + xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; + /* Residue calculation is supported by only AXI DMA */ + xdev->common.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); + xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; + } else { + xdev->common.device_prep_interleaved_dma = + xilinx_vdma_dma_prep_interleaved; + } + + platform_set_drvdata(pdev, xdev); + + /* Initialize the channels */ + for_each_child_of_node(node, child) { + err = xilinx_dma_child_probe(xdev, child); + if (err < 0) + goto disable_clks; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xdev->chan[i]->num_frms = num_frames; + } + + /* Register the DMA engine with the core */ + err = dma_async_device_register(&xdev->common); + if (err) { + dev_err(xdev->dev, "failed to register the dma device\n"); + goto error; + } + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); + if (err < 0) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&xdev->common); + goto error; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); + else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) + dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); + else + dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); + + return 0; + +disable_clks: + xdma_disable_allclks(xdev); +error: + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); + + return err; +} + +/** + * xilinx_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int xilinx_dma_remove(struct platform_device *pdev) +{ + struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); + int i; + + of_dma_controller_free(pdev->dev.of_node); + + dma_async_device_unregister(&xdev->common); + + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); + + xdma_disable_allclks(xdev); + + return 0; +} + +static struct platform_driver xilinx_vdma_driver = { + .driver = { + .name = "xilinx-vdma", + .of_match_table = xilinx_dma_of_ids, + }, + .probe = xilinx_dma_probe, + .remove = xilinx_dma_remove, +}; + +module_platform_driver(xilinx_vdma_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx VDMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c new file mode 100644 index 000000000..e002ff841 --- /dev/null +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -0,0 +1,1161 @@ +/* + * DMA driver for Xilinx ZynqMP DMA Engine + * + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/pm_runtime.h> + +#include "../dmaengine.h" + +/* Register Offsets */ +#define ZYNQMP_DMA_ISR 0x100 +#define ZYNQMP_DMA_IMR 0x104 +#define ZYNQMP_DMA_IER 0x108 +#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_CTRL0 0x110 +#define ZYNQMP_DMA_CTRL1 0x114 +#define ZYNQMP_DMA_DATA_ATTR 0x120 +#define ZYNQMP_DMA_DSCR_ATTR 0x124 +#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 +#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C +#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 +#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 +#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 +#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C +#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 +#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 +#define ZYNQMP_DMA_SRC_START_LSB 0x158 +#define ZYNQMP_DMA_SRC_START_MSB 0x15C +#define ZYNQMP_DMA_DST_START_LSB 0x160 +#define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_TOTAL_BYTE 0x188 +#define ZYNQMP_DMA_RATE_CTRL 0x18C +#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 +#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 +#define ZYNQMP_DMA_CTRL2 0x200 + +/* Interrupt registers bit field definitions */ +#define ZYNQMP_DMA_DONE BIT(10) +#define ZYNQMP_DMA_AXI_WR_DATA BIT(9) +#define ZYNQMP_DMA_AXI_RD_DATA BIT(8) +#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) +#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) +#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) +#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) +#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) +#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) +#define ZYNQMP_DMA_INV_APB BIT(0) + +/* Control 0 register bit field definitions */ +#define ZYNQMP_DMA_OVR_FETCH BIT(7) +#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) +#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) + +/* Control 1 register bit field definitions */ +#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) + +/* Data Attribute register bit field definitions */ +#define ZYNQMP_DMA_ARBURST GENMASK(27, 26) +#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) +#define ZYNQMP_DMA_ARCACHE_OFST 22 +#define ZYNQMP_DMA_ARQOS GENMASK(21, 18) +#define ZYNQMP_DMA_ARQOS_OFST 18 +#define ZYNQMP_DMA_ARLEN GENMASK(17, 14) +#define ZYNQMP_DMA_ARLEN_OFST 14 +#define ZYNQMP_DMA_AWBURST GENMASK(13, 12) +#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) +#define ZYNQMP_DMA_AWCACHE_OFST 8 +#define ZYNQMP_DMA_AWQOS GENMASK(7, 4) +#define ZYNQMP_DMA_AWQOS_OFST 4 +#define ZYNQMP_DMA_AWLEN GENMASK(3, 0) +#define ZYNQMP_DMA_AWLEN_OFST 0 + +/* Descriptor Attribute register bit field definitions */ +#define ZYNQMP_DMA_AXCOHRNT BIT(8) +#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) +#define ZYNQMP_DMA_AXCACHE_OFST 4 +#define ZYNQMP_DMA_AXQOS GENMASK(3, 0) +#define ZYNQMP_DMA_AXQOS_OFST 0 + +/* Control register 2 bit field definitions */ +#define ZYNQMP_DMA_ENABLE BIT(0) + +/* Buffer Descriptor definitions */ +#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 +#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 +#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 +#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 + +/* Interrupt Mask specific definitions */ +#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ + ZYNQMP_DMA_AXI_WR_DATA | \ + ZYNQMP_DMA_AXI_RD_DST_DSCR | \ + ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ + ZYNQMP_DMA_INV_APB) +#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ + ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ + ZYNQMP_DMA_IRQ_DST_ACCT_ERR) +#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) +#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ + ZYNQMP_DMA_INT_ERR | \ + ZYNQMP_DMA_INT_OVRFL | \ + ZYNQMP_DMA_DST_DSCR_DONE) + +/* Max number of descriptors per channel */ +#define ZYNQMP_DMA_NUM_DESCS 32 + +/* Max transfer size per descriptor */ +#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + +/* Reset values for data attributes */ +#define ZYNQMP_DMA_AXCACHE_VAL 0xF + +#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF + +/* Bus width in bits */ +#define ZYNQMP_DMA_BUS_WIDTH_64 64 +#define ZYNQMP_DMA_BUS_WIDTH_128 128 + +#define ZDMA_PM_TIMEOUT 100 + +#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) + +#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ + common) +#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ + async_tx) + +/** + * struct zynqmp_dma_desc_ll - Hw linked list descriptor + * @addr: Buffer address + * @size: Size of the buffer + * @ctrl: Control word + * @nxtdscraddr: Next descriptor base address + * @rsvd: Reserved field and for Hw internal use. + */ +struct zynqmp_dma_desc_ll { + u64 addr; + u32 size; + u32 ctrl; + u64 nxtdscraddr; + u64 rsvd; +}; + +/** + * struct zynqmp_dma_desc_sw - Per Transaction structure + * @src: Source address for simple mode dma + * @dst: Destination address for simple mode dma + * @len: Transfer length for simple mode dma + * @node: Node in the channel descriptor list + * @tx_list: List head for the current transfer + * @async_tx: Async transaction descriptor + * @src_v: Virtual address of the src descriptor + * @src_p: Physical address of the src descriptor + * @dst_v: Virtual address of the dst descriptor + * @dst_p: Physical address of the dst descriptor + */ +struct zynqmp_dma_desc_sw { + u64 src; + u64 dst; + u32 len; + struct list_head node; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + struct zynqmp_dma_desc_ll *src_v; + dma_addr_t src_p; + struct zynqmp_dma_desc_ll *dst_v; + dma_addr_t dst_p; +}; + +/** + * struct zynqmp_dma_chan - Driver specific DMA channel structure + * @zdev: Driver specific device structure + * @regs: Control registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @free_list: Descriptors free + * @active_list: Descriptors active + * @sw_desc_pool: SW descriptor pool + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool_v: Statically allocated descriptor base + * @desc_pool_p: Physical allocated descriptor base + * @desc_free_cnt: Descriptor available count + * @dev: The dma device + * @irq: Channel IRQ + * @is_dmacoherent: Tells whether dma operations are coherent or not + * @tasklet: Cleanup work after irq + * @idle : Channel status; + * @desc_size: Size of the low level descriptor + * @err: Channel has errors + * @bus_width: Bus width + * @src_burst_len: Source burst length + * @dst_burst_len: Dest burst length + */ +struct zynqmp_dma_chan { + struct zynqmp_dma_device *zdev; + void __iomem *regs; + spinlock_t lock; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct zynqmp_dma_desc_sw *sw_desc_pool; + struct list_head done_list; + struct dma_chan common; + void *desc_pool_v; + dma_addr_t desc_pool_p; + u32 desc_free_cnt; + struct device *dev; + int irq; + bool is_dmacoherent; + struct tasklet_struct tasklet; + bool idle; + u32 desc_size; + bool err; + u32 bus_width; + u32 src_burst_len; + u32 dst_burst_len; +}; + +/** + * struct zynqmp_dma_device - DMA device structure + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock + */ +struct zynqmp_dma_device { + struct device *dev; + struct dma_device common; + struct zynqmp_dma_chan *chan; + struct clk *clk_main; + struct clk *clk_apb; +}; + +static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, + u64 value) +{ + lo_hi_writeq(value, chan->regs + reg); +} + +/** + * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller + * @chan: ZynqMP DMA DMA channel pointer + * @desc: Transaction descriptor pointer + */ +static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *desc) +{ + dma_addr_t addr; + + addr = desc->src_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); + addr = desc->dst_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); +} + +/** + * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor + * @chan: ZynqMP DMA channel pointer + * @desc: Hw descriptor pointer + */ +static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, + void *desc) +{ + struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; + + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; + hw++; + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; +} + +/** + * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor + * @chan: ZynqMP DMA channel pointer + * @sdesc: Hw descriptor pointer + * @src: Source buffer address + * @dst: Destination buffer address + * @len: Transfer length + * @prev: Previous hw descriptor pointer + */ +static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_ll *sdesc, + dma_addr_t src, dma_addr_t dst, size_t len, + struct zynqmp_dma_desc_ll *prev) +{ + struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; + + sdesc->size = ddesc->size = len; + sdesc->addr = src; + ddesc->addr = dst; + + sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; + if (chan->is_dmacoherent) { + sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + } + + if (prev) { + dma_addr_t addr = chan->desc_pool_p + + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); + ddesc = prev + 1; + prev->nxtdscraddr = addr; + ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); + } +} + +/** + * zynqmp_dma_init - Initialize the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) +{ + u32 val; + + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + val = readl(chan->regs + ZYNQMP_DMA_ISR); + writel(val, chan->regs + ZYNQMP_DMA_ISR); + + if (chan->is_dmacoherent) { + val = ZYNQMP_DMA_AXCOHRNT; + val = (val & ~ZYNQMP_DMA_AXCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); + } + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + if (chan->is_dmacoherent) { + val = (val & ~ZYNQMP_DMA_ARCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); + val = (val & ~ZYNQMP_DMA_AWCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); + } + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + + /* Clearing the interrupt account rgisters */ + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + chan->idle = true; +} + +/** + * zynqmp_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zynqmp_dma_chan *chan = to_chan(tx->chan); + struct zynqmp_dma_desc_sw *desc, *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&chan->lock); + cookie = dma_cookie_assign(tx); + + if (!list_empty(&chan->pending_list)) { + desc = list_last_entry(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!list_empty(&desc->tx_list)) + desc = list_last_entry(&desc->tx_list, + struct zynqmp_dma_desc_sw, node); + desc->src_v->nxtdscraddr = new->src_p; + desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + desc->dst_v->nxtdscraddr = new->dst_p; + desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + } + + list_add_tail(&new->node, &chan->pending_list); + spin_unlock_bh(&chan->lock); + + return cookie; +} + +/** + * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool + * @chan: ZynqMP DMA channel pointer + * + * Return: The sw descriptor + */ +static struct zynqmp_dma_desc_sw * +zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + spin_lock_bh(&chan->lock); + desc = list_first_entry(&chan->free_list, + struct zynqmp_dma_desc_sw, node); + list_del(&desc->node); + spin_unlock_bh(&chan->lock); + + INIT_LIST_HEAD(&desc->tx_list); + /* Clear the src and dst descriptor memory */ + memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + + return desc; +} + +/** + * zynqmp_dma_free_descriptor - Issue pending transactions + * @chan: ZynqMP DMA channel pointer + * @sdesc: Transaction descriptor pointer + */ +static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *sdesc) +{ + struct zynqmp_dma_desc_sw *child, *next; + + chan->desc_free_cnt++; + list_add_tail(&sdesc->node, &chan->free_list); + list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { + chan->desc_free_cnt++; + list_move_tail(&child->node, &chan->free_list); + } +} + +/** + * zynqmp_dma_free_desc_list - Free descriptors list + * @chan: ZynqMP DMA channel pointer + * @list: List to parse and delete the descriptor + */ +static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, + struct list_head *list) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + zynqmp_dma_free_descriptor(chan, desc); +} + +/** + * zynqmp_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + struct zynqmp_dma_desc_sw *desc; + int i, ret; + + ret = pm_runtime_get_sync(chan->dev); + if (ret < 0) + return ret; + + chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc), + GFP_KERNEL); + if (!chan->sw_desc_pool) + return -ENOMEM; + + chan->idle = true; + chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; + + INIT_LIST_HEAD(&chan->free_list); + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = zynqmp_dma_tx_submit; + list_add_tail(&desc->node, &chan->free_list); + } + + chan->desc_pool_v = dma_zalloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); + if (!chan->desc_pool_v) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); + desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); + desc->src_p = chan->desc_pool_p + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); + desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); + } + + return ZYNQMP_DMA_NUM_DESCS; +} + +/** + * zynqmp_dma_start - Start DMA channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); + chan->idle = false; + writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); +} + +/** + * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt + * @chan: ZynqMP DMA channel pointer + * @status: Interrupt status value + */ +static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) +{ + if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL) + writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); + if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) + readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) + readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); +} + +static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) +{ + u32 val, burst_val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); + val = (val & ~ZYNQMP_DMA_ARLEN) | + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); + val = (val & ~ZYNQMP_DMA_AWLEN) | + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); +} + +/** + * zynqmp_dma_device_config - Zynqmp dma device configuration + * @dchan: DMA channel + * @config: DMA device config + * + * Return: 0 always + */ +static int zynqmp_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); + + return 0; +} + +/** + * zynqmp_dma_start_transfer - Initiate the new transfer + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + if (!chan->idle) + return; + + zynqmp_dma_config(chan); + + desc = list_first_entry_or_null(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + zynqmp_dma_update_desc_to_ctrlr(chan, desc); + zynqmp_dma_start(chan); +} + + +/** + * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors + * @chan: ZynqMP DMA channel + */ +static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&chan->lock); + callback(callback_param); + spin_lock(&chan->lock); + } + + /* Run any dependencies, then free the descriptor */ + zynqmp_dma_free_descriptor(chan, desc); + } +} + +/** + * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + desc = list_first_entry_or_null(&chan->active_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); +} + +/** + * zynqmp_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_issue_pending(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_start_transfer(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * zynqmp_dma_free_descriptors - Free channel descriptors + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) +{ + zynqmp_dma_free_desc_list(chan, &chan->active_list); + zynqmp_dma_free_desc_list(chan, &chan->pending_list); + zynqmp_dma_free_desc_list(chan, &chan->done_list); +} + +/** + * zynqmp_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + dma_free_coherent(chan->dev, + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), + chan->desc_pool_v, chan->desc_pool_p); + kfree(chan->sw_desc_pool); + pm_runtime_mark_last_busy(chan->dev); + pm_runtime_put_autosuspend(chan->dev); +} + +/** + * zynqmp_dma_reset - Reset the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); +} + +/** + * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the ZynqMP DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 isr, imr, status; + irqreturn_t ret = IRQ_NONE; + + isr = readl(chan->regs + ZYNQMP_DMA_ISR); + imr = readl(chan->regs + ZYNQMP_DMA_IMR); + status = isr & ~imr; + + writel(isr, chan->regs + ZYNQMP_DMA_ISR); + if (status & ZYNQMP_DMA_INT_DONE) { + tasklet_schedule(&chan->tasklet); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_DONE) + chan->idle = true; + + if (status & ZYNQMP_DMA_INT_ERR) { + chan->err = true; + tasklet_schedule(&chan->tasklet); + dev_err(chan->dev, "Channel %p has errors\n", chan); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_INT_OVRFL) { + zynqmp_dma_handle_ovfl_int(chan, status); + dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan); + ret = IRQ_HANDLED; + } + + return ret; +} + +/** + * zynqmp_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the ZynqMP DMA channel structure + */ +static void zynqmp_dma_do_tasklet(unsigned long data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 count; + + spin_lock(&chan->lock); + + if (chan->err) { + zynqmp_dma_reset(chan); + chan->err = false; + goto unlock; + } + + count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + while (count) { + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + count--; + } + + if (chan->idle) + zynqmp_dma_start_transfer(chan); + +unlock: + spin_unlock(&chan->lock); +} + +/** + * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel + * @dchan: DMA channel pointer + * + * Return: Always '0' + */ +static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + + return 0; +} + +/** + * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct zynqmp_dma_chan *chan; + struct zynqmp_dma_desc_sw *new, *first = NULL; + void *desc = NULL, *prev = NULL; + size_t copy; + u32 desc_cnt; + + chan = to_chan(dchan); + + desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + do { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + + copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, + dma_dst, copy, prev); + prev = desc; + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + zynqmp_dma_desc_config_eod(chan, desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_chan_remove - Channel remove function + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) +{ + if (!chan) + return; + + if (chan->irq) + devm_free_irq(chan->zdev->dev, chan->irq, chan); + tasklet_kill(&chan->tasklet); + list_del(&chan->common.device_node); +} + +/** + * zynqmp_dma_chan_probe - Per Channel Probing + * @zdev: Driver specific device structure + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + struct platform_device *pdev) +{ + struct zynqmp_dma_chan *chan; + struct resource *res; + struct device_node *node = pdev->dev.of_node; + int err; + + chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = zdev->dev; + chan->zdev = zdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chan->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chan->regs)) + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if (err < 0) { + dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); + return err; + } + + if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && + chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { + dev_err(zdev->dev, "invalid bus-width value"); + return -EINVAL; + } + + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); + zdev->chan = chan; + tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->free_list); + + dma_cookie_init(&chan->common); + chan->common.device = &zdev->common; + list_add_tail(&chan->common.device_node, &zdev->common.channels); + + zynqmp_dma_init(chan); + chan->irq = platform_get_irq(pdev, 0); + if (chan->irq < 0) + return -ENXIO; + err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, + "zynqmp-dma", chan); + if (err) + return err; + + chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); + chan->idle = true; + return 0; +} + +/** + * of_zynqmp_dma_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zynqmp_dma_device *zdev = ofdma->of_dma_data; + + return dma_get_slave_channel(&zdev->chan->common); +} + +/** + * zynqmp_dma_suspend - Suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 on success and failure value on error + */ +static int __maybe_unused zynqmp_dma_suspend(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_suspend(dev); + + return 0; +} + +/** + * zynqmp_dma_resume - Resume from suspend + * @dev: Address of the device structure + * + * Resume operation after suspend. + * Return: 0 on success and failure value on error + */ +static int __maybe_unused zynqmp_dma_resume(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_resume(dev); + + return 0; +} + +/** + * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 always + */ +static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev) +{ + struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); + + clk_disable_unprepare(zdev->clk_main); + clk_disable_unprepare(zdev->clk_apb); + + return 0; +} + +/** + * zynqmp_dma_runtime_resume - Runtime suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 always + */ +static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev) +{ + struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(zdev->clk_main); + if (err) { + dev_err(dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(zdev->clk_apb); + if (err) { + dev_err(dev, "Unable to enable apb clock.\n"); + clk_disable_unprepare(zdev->clk_main); + return err; + } + + return 0; +} + +static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume) + SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend, + zynqmp_dma_runtime_resume, NULL) +}; + +/** + * zynqmp_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_probe(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev; + struct dma_device *p; + int ret; + + zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; + + zdev->dev = &pdev->dev; + INIT_LIST_HEAD(&zdev->common.channels); + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); + + p = &zdev->common; + p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; + p->device_terminate_all = zynqmp_dma_device_terminate_all; + p->device_issue_pending = zynqmp_dma_issue_pending; + p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; + p->device_free_chan_resources = zynqmp_dma_free_chan_resources; + p->device_tx_status = dma_cookie_status; + p->device_config = zynqmp_dma_device_config; + p->dev = &pdev->dev; + + zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(zdev->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(zdev->clk_main); + } + + zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(zdev->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(zdev->clk_apb); + } + + platform_set_drvdata(pdev, zdev); + pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); + pm_runtime_use_autosuspend(zdev->dev); + pm_runtime_enable(zdev->dev); + pm_runtime_get_sync(zdev->dev); + if (!pm_runtime_enabled(zdev->dev)) { + ret = zynqmp_dma_runtime_resume(zdev->dev); + if (ret) + return ret; + } + + ret = zynqmp_dma_chan_probe(zdev, pdev); + if (ret) { + dev_err(&pdev->dev, "Probing channel failed\n"); + goto err_disable_pm; + } + + p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); + p->src_addr_widths = BIT(zdev->chan->bus_width / 8); + + dma_async_device_register(&zdev->common); + + ret = of_dma_controller_register(pdev->dev.of_node, + of_zynqmp_dma_xlate, zdev); + if (ret) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&zdev->common); + goto free_chan_resources; + } + + pm_runtime_mark_last_busy(zdev->dev); + pm_runtime_put_sync_autosuspend(zdev->dev); + + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); + + return 0; + +free_chan_resources: + zynqmp_dma_chan_remove(zdev->chan); +err_disable_pm: + if (!pm_runtime_enabled(zdev->dev)) + zynqmp_dma_runtime_suspend(zdev->dev); + pm_runtime_disable(zdev->dev); + return ret; +} + +/** + * zynqmp_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int zynqmp_dma_remove(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&zdev->common); + + zynqmp_dma_chan_remove(zdev->chan); + pm_runtime_disable(zdev->dev); + if (!pm_runtime_enabled(zdev->dev)) + zynqmp_dma_runtime_suspend(zdev->dev); + + return 0; +} + +static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "xlnx,zynqmp-dma-1.0", }, + {} +}; +MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); + +static struct platform_driver zynqmp_dma_driver = { + .driver = { + .name = "xilinx-zynqmp-dma", + .of_match_table = zynqmp_dma_of_match, + .pm = &zynqmp_dma_dev_pm_ops, + }, + .probe = zynqmp_dma_probe, + .remove = zynqmp_dma_remove, +}; + +module_platform_driver(zynqmp_dma_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); |