summaryrefslogtreecommitdiffstats
path: root/sound/soc/sprd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /sound/soc/sprd
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sound/soc/sprd')
-rw-r--r--sound/soc/sprd/Kconfig16
-rw-r--r--sound/soc/sprd/Makefile8
-rw-r--r--sound/soc/sprd/sprd-mcdt.c1006
-rw-r--r--sound/soc/sprd/sprd-mcdt.h107
-rw-r--r--sound/soc/sprd/sprd-pcm-compress.c671
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c497
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.h58
7 files changed, 2363 insertions, 0 deletions
diff --git a/sound/soc/sprd/Kconfig b/sound/soc/sprd/Kconfig
new file mode 100644
index 0000000000..5e0ac82785
--- /dev/null
+++ b/sound/soc/sprd/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SPRD
+ tristate "SoC Audio for the Spreadtrum SoC chips"
+ depends on ARCH_SPRD || COMPILE_TEST
+ select SND_SOC_COMPRESS
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the Spreadtrum SoCs' Audio interfaces.
+
+config SND_SOC_SPRD_MCDT
+ tristate "Spreadtrum multi-channel data transfer support"
+ depends on SND_SOC_SPRD
+ help
+ Say y here to enable multi-channel data transfer support. It
+ is used for sound stream transmission between audio subsystem
+ and other AP/CP subsystem.
diff --git a/sound/soc/sprd/Makefile b/sound/soc/sprd/Makefile
new file mode 100644
index 0000000000..a95fa56cd0
--- /dev/null
+++ b/sound/soc/sprd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Spreadtrum Audio Support
+
+snd-soc-sprd-platform-objs := sprd-pcm-dma.o sprd-pcm-compress.o
+
+obj-$(CONFIG_SND_SOC_SPRD) += snd-soc-sprd-platform.o
+
+obj-$(CONFIG_SND_SOC_SPRD_MCDT) += sprd-mcdt.o
diff --git a/sound/soc/sprd/sprd-mcdt.c b/sound/soc/sprd/sprd-mcdt.c
new file mode 100644
index 0000000000..688419c6b0
--- /dev/null
+++ b/sound/soc/sprd/sprd-mcdt.c
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "sprd-mcdt.h"
+
+/* MCDT registers definition */
+#define MCDT_CH0_TXD 0x0
+#define MCDT_CH0_RXD 0x28
+#define MCDT_DAC0_WTMK 0x60
+#define MCDT_ADC0_WTMK 0x88
+#define MCDT_DMA_EN 0xb0
+
+#define MCDT_INT_EN0 0xb4
+#define MCDT_INT_EN1 0xb8
+#define MCDT_INT_EN2 0xbc
+
+#define MCDT_INT_CLR0 0xc0
+#define MCDT_INT_CLR1 0xc4
+#define MCDT_INT_CLR2 0xc8
+
+#define MCDT_INT_RAW1 0xcc
+#define MCDT_INT_RAW2 0xd0
+#define MCDT_INT_RAW3 0xd4
+
+#define MCDT_INT_MSK1 0xd8
+#define MCDT_INT_MSK2 0xdc
+#define MCDT_INT_MSK3 0xe0
+
+#define MCDT_DAC0_FIFO_ADDR_ST 0xe4
+#define MCDT_ADC0_FIFO_ADDR_ST 0xe8
+
+#define MCDT_CH_FIFO_ST0 0x134
+#define MCDT_CH_FIFO_ST1 0x138
+#define MCDT_CH_FIFO_ST2 0x13c
+
+#define MCDT_INT_MSK_CFG0 0x140
+#define MCDT_INT_MSK_CFG1 0x144
+
+#define MCDT_DMA_CFG0 0x148
+#define MCDT_FIFO_CLR 0x14c
+#define MCDT_DMA_CFG1 0x150
+#define MCDT_DMA_CFG2 0x154
+#define MCDT_DMA_CFG3 0x158
+#define MCDT_DMA_CFG4 0x15c
+#define MCDT_DMA_CFG5 0x160
+
+/* Channel water mark definition */
+#define MCDT_CH_FIFO_AE_SHIFT 16
+#define MCDT_CH_FIFO_AE_MASK GENMASK(24, 16)
+#define MCDT_CH_FIFO_AF_MASK GENMASK(8, 0)
+
+/* DMA channel select definition */
+#define MCDT_DMA_CH0_SEL_MASK GENMASK(3, 0)
+#define MCDT_DMA_CH0_SEL_SHIFT 0
+#define MCDT_DMA_CH1_SEL_MASK GENMASK(7, 4)
+#define MCDT_DMA_CH1_SEL_SHIFT 4
+#define MCDT_DMA_CH2_SEL_MASK GENMASK(11, 8)
+#define MCDT_DMA_CH2_SEL_SHIFT 8
+#define MCDT_DMA_CH3_SEL_MASK GENMASK(15, 12)
+#define MCDT_DMA_CH3_SEL_SHIFT 12
+#define MCDT_DMA_CH4_SEL_MASK GENMASK(19, 16)
+#define MCDT_DMA_CH4_SEL_SHIFT 16
+#define MCDT_DAC_DMA_SHIFT 16
+
+/* DMA channel ACK select definition */
+#define MCDT_DMA_ACK_SEL_MASK GENMASK(3, 0)
+
+/* Channel FIFO definition */
+#define MCDT_CH_FIFO_ADDR_SHIFT 16
+#define MCDT_CH_FIFO_ADDR_MASK GENMASK(9, 0)
+#define MCDT_ADC_FIFO_SHIFT 16
+#define MCDT_FIFO_LENGTH 512
+
+#define MCDT_ADC_CHANNEL_NUM 10
+#define MCDT_DAC_CHANNEL_NUM 10
+#define MCDT_CHANNEL_NUM (MCDT_ADC_CHANNEL_NUM + MCDT_DAC_CHANNEL_NUM)
+
+enum sprd_mcdt_fifo_int {
+ MCDT_ADC_FIFO_AE_INT,
+ MCDT_ADC_FIFO_AF_INT,
+ MCDT_DAC_FIFO_AE_INT,
+ MCDT_DAC_FIFO_AF_INT,
+ MCDT_ADC_FIFO_OV_INT,
+ MCDT_DAC_FIFO_OV_INT
+};
+
+enum sprd_mcdt_fifo_sts {
+ MCDT_ADC_FIFO_REAL_FULL,
+ MCDT_ADC_FIFO_REAL_EMPTY,
+ MCDT_ADC_FIFO_AF,
+ MCDT_ADC_FIFO_AE,
+ MCDT_DAC_FIFO_REAL_FULL,
+ MCDT_DAC_FIFO_REAL_EMPTY,
+ MCDT_DAC_FIFO_AF,
+ MCDT_DAC_FIFO_AE
+};
+
+struct sprd_mcdt_dev {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+ struct sprd_mcdt_chan chan[MCDT_CHANNEL_NUM];
+};
+
+static LIST_HEAD(sprd_mcdt_chan_list);
+static DEFINE_MUTEX(sprd_mcdt_list_mutex);
+
+static void sprd_mcdt_update(struct sprd_mcdt_dev *mcdt, u32 reg, u32 val,
+ u32 mask)
+{
+ u32 orig = readl_relaxed(mcdt->base + reg);
+ u32 tmp;
+
+ tmp = (orig & ~mask) | val;
+ writel_relaxed(tmp, mcdt->base + reg);
+}
+
+static void sprd_mcdt_dac_set_watermark(struct sprd_mcdt_dev *mcdt, u8 channel,
+ u32 full, u32 empty)
+{
+ u32 reg = MCDT_DAC0_WTMK + channel * 4;
+ u32 water_mark =
+ (empty << MCDT_CH_FIFO_AE_SHIFT) & MCDT_CH_FIFO_AE_MASK;
+
+ water_mark |= full & MCDT_CH_FIFO_AF_MASK;
+ sprd_mcdt_update(mcdt, reg, water_mark,
+ MCDT_CH_FIFO_AE_MASK | MCDT_CH_FIFO_AF_MASK);
+}
+
+static void sprd_mcdt_adc_set_watermark(struct sprd_mcdt_dev *mcdt, u8 channel,
+ u32 full, u32 empty)
+{
+ u32 reg = MCDT_ADC0_WTMK + channel * 4;
+ u32 water_mark =
+ (empty << MCDT_CH_FIFO_AE_SHIFT) & MCDT_CH_FIFO_AE_MASK;
+
+ water_mark |= full & MCDT_CH_FIFO_AF_MASK;
+ sprd_mcdt_update(mcdt, reg, water_mark,
+ MCDT_CH_FIFO_AE_MASK | MCDT_CH_FIFO_AF_MASK);
+}
+
+static void sprd_mcdt_dac_dma_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
+ bool enable)
+{
+ u32 shift = MCDT_DAC_DMA_SHIFT + channel;
+
+ if (enable)
+ sprd_mcdt_update(mcdt, MCDT_DMA_EN, BIT(shift), BIT(shift));
+ else
+ sprd_mcdt_update(mcdt, MCDT_DMA_EN, 0, BIT(shift));
+}
+
+static void sprd_mcdt_adc_dma_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
+ bool enable)
+{
+ if (enable)
+ sprd_mcdt_update(mcdt, MCDT_DMA_EN, BIT(channel), BIT(channel));
+ else
+ sprd_mcdt_update(mcdt, MCDT_DMA_EN, 0, BIT(channel));
+}
+
+static void sprd_mcdt_ap_int_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
+ bool enable)
+{
+ if (enable)
+ sprd_mcdt_update(mcdt, MCDT_INT_MSK_CFG0, BIT(channel),
+ BIT(channel));
+ else
+ sprd_mcdt_update(mcdt, MCDT_INT_MSK_CFG0, 0, BIT(channel));
+}
+
+static void sprd_mcdt_dac_write_fifo(struct sprd_mcdt_dev *mcdt, u8 channel,
+ u32 val)
+{
+ u32 reg = MCDT_CH0_TXD + channel * 4;
+
+ writel_relaxed(val, mcdt->base + reg);
+}
+
+static void sprd_mcdt_adc_read_fifo(struct sprd_mcdt_dev *mcdt, u8 channel,
+ u32 *val)
+{
+ u32 reg = MCDT_CH0_RXD + channel * 4;
+
+ *val = readl_relaxed(mcdt->base + reg);
+}
+
+static void sprd_mcdt_dac_dma_chn_select(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_dma_chan dma_chan)
+{
+ switch (dma_chan) {
+ case SPRD_MCDT_DMA_CH0:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
+ channel << MCDT_DMA_CH0_SEL_SHIFT,
+ MCDT_DMA_CH0_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH1:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
+ channel << MCDT_DMA_CH1_SEL_SHIFT,
+ MCDT_DMA_CH1_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH2:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
+ channel << MCDT_DMA_CH2_SEL_SHIFT,
+ MCDT_DMA_CH2_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH3:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
+ channel << MCDT_DMA_CH3_SEL_SHIFT,
+ MCDT_DMA_CH3_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH4:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
+ channel << MCDT_DMA_CH4_SEL_SHIFT,
+ MCDT_DMA_CH4_SEL_MASK);
+ break;
+ }
+}
+
+static void sprd_mcdt_adc_dma_chn_select(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_dma_chan dma_chan)
+{
+ switch (dma_chan) {
+ case SPRD_MCDT_DMA_CH0:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
+ channel << MCDT_DMA_CH0_SEL_SHIFT,
+ MCDT_DMA_CH0_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH1:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
+ channel << MCDT_DMA_CH1_SEL_SHIFT,
+ MCDT_DMA_CH1_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH2:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
+ channel << MCDT_DMA_CH2_SEL_SHIFT,
+ MCDT_DMA_CH2_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH3:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
+ channel << MCDT_DMA_CH3_SEL_SHIFT,
+ MCDT_DMA_CH3_SEL_MASK);
+ break;
+
+ case SPRD_MCDT_DMA_CH4:
+ sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
+ channel << MCDT_DMA_CH4_SEL_SHIFT,
+ MCDT_DMA_CH4_SEL_MASK);
+ break;
+ }
+}
+
+static u32 sprd_mcdt_dma_ack_shift(u8 channel)
+{
+ switch (channel) {
+ default:
+ case 0:
+ case 8:
+ return 0;
+ case 1:
+ case 9:
+ return 4;
+ case 2:
+ return 8;
+ case 3:
+ return 12;
+ case 4:
+ return 16;
+ case 5:
+ return 20;
+ case 6:
+ return 24;
+ case 7:
+ return 28;
+ }
+}
+
+static void sprd_mcdt_dac_dma_ack_select(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_dma_chan dma_chan)
+{
+ u32 reg, shift = sprd_mcdt_dma_ack_shift(channel), ack = dma_chan;
+
+ switch (channel) {
+ case 0 ... 7:
+ reg = MCDT_DMA_CFG2;
+ break;
+
+ case 8 ... 9:
+ reg = MCDT_DMA_CFG3;
+ break;
+
+ default:
+ return;
+ }
+
+ sprd_mcdt_update(mcdt, reg, ack << shift,
+ MCDT_DMA_ACK_SEL_MASK << shift);
+}
+
+static void sprd_mcdt_adc_dma_ack_select(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_dma_chan dma_chan)
+{
+ u32 reg, shift = sprd_mcdt_dma_ack_shift(channel), ack = dma_chan;
+
+ switch (channel) {
+ case 0 ... 7:
+ reg = MCDT_DMA_CFG4;
+ break;
+
+ case 8 ... 9:
+ reg = MCDT_DMA_CFG5;
+ break;
+
+ default:
+ return;
+ }
+
+ sprd_mcdt_update(mcdt, reg, ack << shift,
+ MCDT_DMA_ACK_SEL_MASK << shift);
+}
+
+static bool sprd_mcdt_chan_fifo_sts(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_fifo_sts fifo_sts)
+{
+ u32 reg, shift;
+
+ switch (channel) {
+ case 0 ... 3:
+ reg = MCDT_CH_FIFO_ST0;
+ break;
+ case 4 ... 7:
+ reg = MCDT_CH_FIFO_ST1;
+ break;
+ case 8 ... 9:
+ reg = MCDT_CH_FIFO_ST2;
+ break;
+ default:
+ return false;
+ }
+
+ switch (channel) {
+ case 0:
+ case 4:
+ case 8:
+ shift = fifo_sts;
+ break;
+
+ case 1:
+ case 5:
+ case 9:
+ shift = 8 + fifo_sts;
+ break;
+
+ case 2:
+ case 6:
+ shift = 16 + fifo_sts;
+ break;
+
+ case 3:
+ case 7:
+ shift = 24 + fifo_sts;
+ break;
+
+ default:
+ return false;
+ }
+
+ return !!(readl_relaxed(mcdt->base + reg) & BIT(shift));
+}
+
+static void sprd_mcdt_dac_fifo_clear(struct sprd_mcdt_dev *mcdt, u8 channel)
+{
+ sprd_mcdt_update(mcdt, MCDT_FIFO_CLR, BIT(channel), BIT(channel));
+}
+
+static void sprd_mcdt_adc_fifo_clear(struct sprd_mcdt_dev *mcdt, u8 channel)
+{
+ u32 shift = MCDT_ADC_FIFO_SHIFT + channel;
+
+ sprd_mcdt_update(mcdt, MCDT_FIFO_CLR, BIT(shift), BIT(shift));
+}
+
+static u32 sprd_mcdt_dac_fifo_avail(struct sprd_mcdt_dev *mcdt, u8 channel)
+{
+ u32 reg = MCDT_DAC0_FIFO_ADDR_ST + channel * 8;
+ u32 r_addr = (readl_relaxed(mcdt->base + reg) >>
+ MCDT_CH_FIFO_ADDR_SHIFT) & MCDT_CH_FIFO_ADDR_MASK;
+ u32 w_addr = readl_relaxed(mcdt->base + reg) & MCDT_CH_FIFO_ADDR_MASK;
+
+ if (w_addr >= r_addr)
+ return 4 * (MCDT_FIFO_LENGTH - w_addr + r_addr);
+ else
+ return 4 * (r_addr - w_addr);
+}
+
+static u32 sprd_mcdt_adc_fifo_avail(struct sprd_mcdt_dev *mcdt, u8 channel)
+{
+ u32 reg = MCDT_ADC0_FIFO_ADDR_ST + channel * 8;
+ u32 r_addr = (readl_relaxed(mcdt->base + reg) >>
+ MCDT_CH_FIFO_ADDR_SHIFT) & MCDT_CH_FIFO_ADDR_MASK;
+ u32 w_addr = readl_relaxed(mcdt->base + reg) & MCDT_CH_FIFO_ADDR_MASK;
+
+ if (w_addr >= r_addr)
+ return 4 * (w_addr - r_addr);
+ else
+ return 4 * (MCDT_FIFO_LENGTH - r_addr + w_addr);
+}
+
+static u32 sprd_mcdt_int_type_shift(u8 channel,
+ enum sprd_mcdt_fifo_int int_type)
+{
+ switch (channel) {
+ case 0:
+ case 4:
+ case 8:
+ return int_type;
+
+ case 1:
+ case 5:
+ case 9:
+ return 8 + int_type;
+
+ case 2:
+ case 6:
+ return 16 + int_type;
+
+ case 3:
+ case 7:
+ return 24 + int_type;
+
+ default:
+ return 0;
+ }
+}
+
+static void sprd_mcdt_chan_int_en(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_fifo_int int_type, bool enable)
+{
+ u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
+
+ switch (channel) {
+ case 0 ... 3:
+ reg = MCDT_INT_EN0;
+ break;
+ case 4 ... 7:
+ reg = MCDT_INT_EN1;
+ break;
+ case 8 ... 9:
+ reg = MCDT_INT_EN2;
+ break;
+ default:
+ return;
+ }
+
+ if (enable)
+ sprd_mcdt_update(mcdt, reg, BIT(shift), BIT(shift));
+ else
+ sprd_mcdt_update(mcdt, reg, 0, BIT(shift));
+}
+
+static void sprd_mcdt_chan_int_clear(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_fifo_int int_type)
+{
+ u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
+
+ switch (channel) {
+ case 0 ... 3:
+ reg = MCDT_INT_CLR0;
+ break;
+ case 4 ... 7:
+ reg = MCDT_INT_CLR1;
+ break;
+ case 8 ... 9:
+ reg = MCDT_INT_CLR2;
+ break;
+ default:
+ return;
+ }
+
+ sprd_mcdt_update(mcdt, reg, BIT(shift), BIT(shift));
+}
+
+static bool sprd_mcdt_chan_int_sts(struct sprd_mcdt_dev *mcdt, u8 channel,
+ enum sprd_mcdt_fifo_int int_type)
+{
+ u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
+
+ switch (channel) {
+ case 0 ... 3:
+ reg = MCDT_INT_MSK1;
+ break;
+ case 4 ... 7:
+ reg = MCDT_INT_MSK2;
+ break;
+ case 8 ... 9:
+ reg = MCDT_INT_MSK3;
+ break;
+ default:
+ return false;
+ }
+
+ return !!(readl_relaxed(mcdt->base + reg) & BIT(shift));
+}
+
+static irqreturn_t sprd_mcdt_irq_handler(int irq, void *dev_id)
+{
+ struct sprd_mcdt_dev *mcdt = (struct sprd_mcdt_dev *)dev_id;
+ int i;
+
+ spin_lock(&mcdt->lock);
+
+ for (i = 0; i < MCDT_ADC_CHANNEL_NUM; i++) {
+ if (sprd_mcdt_chan_int_sts(mcdt, i, MCDT_ADC_FIFO_AF_INT)) {
+ struct sprd_mcdt_chan *chan = &mcdt->chan[i];
+
+ sprd_mcdt_chan_int_clear(mcdt, i, MCDT_ADC_FIFO_AF_INT);
+ if (chan->cb)
+ chan->cb->notify(chan->cb->data);
+ }
+ }
+
+ for (i = 0; i < MCDT_DAC_CHANNEL_NUM; i++) {
+ if (sprd_mcdt_chan_int_sts(mcdt, i, MCDT_DAC_FIFO_AE_INT)) {
+ struct sprd_mcdt_chan *chan =
+ &mcdt->chan[i + MCDT_ADC_CHANNEL_NUM];
+
+ sprd_mcdt_chan_int_clear(mcdt, i, MCDT_DAC_FIFO_AE_INT);
+ if (chan->cb)
+ chan->cb->notify(chan->cb->data);
+ }
+ }
+
+ spin_unlock(&mcdt->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sprd_mcdt_chan_write - write data to the MCDT channel's fifo
+ * @chan: the MCDT channel
+ * @tx_buf: send buffer
+ * @size: data size
+ *
+ * Note: We can not write data to the channel fifo when enabling the DMA mode,
+ * otherwise the channel fifo data will be invalid.
+ *
+ * If there are not enough space of the channel fifo, it will return errors
+ * to users.
+ *
+ * Returns 0 on success, or an appropriate error code on failure.
+ */
+int sprd_mcdt_chan_write(struct sprd_mcdt_chan *chan, char *tx_buf, u32 size)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+ int avail, i = 0, words = size / 4;
+ u32 *buf = (u32 *)tx_buf;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (chan->dma_enable) {
+ dev_err(mcdt->dev,
+ "Can not write data when DMA mode enabled\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EINVAL;
+ }
+
+ if (sprd_mcdt_chan_fifo_sts(mcdt, chan->id, MCDT_DAC_FIFO_REAL_FULL)) {
+ dev_err(mcdt->dev, "Channel fifo is full now\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EBUSY;
+ }
+
+ avail = sprd_mcdt_dac_fifo_avail(mcdt, chan->id);
+ if (size > avail) {
+ dev_err(mcdt->dev,
+ "Data size is larger than the available fifo size\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EBUSY;
+ }
+
+ while (i++ < words)
+ sprd_mcdt_dac_write_fifo(mcdt, chan->id, *buf++);
+
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_write);
+
+/**
+ * sprd_mcdt_chan_read - read data from the MCDT channel's fifo
+ * @chan: the MCDT channel
+ * @rx_buf: receive buffer
+ * @size: data size
+ *
+ * Note: We can not read data from the channel fifo when enabling the DMA mode,
+ * otherwise the reading data will be invalid.
+ *
+ * Usually user need start to read data once receiving the fifo full interrupt.
+ *
+ * Returns data size of reading successfully, or an error code on failure.
+ */
+int sprd_mcdt_chan_read(struct sprd_mcdt_chan *chan, char *rx_buf, u32 size)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+ int i = 0, avail, words = size / 4;
+ u32 *buf = (u32 *)rx_buf;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (chan->dma_enable) {
+ dev_err(mcdt->dev, "Can not read data when DMA mode enabled\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EINVAL;
+ }
+
+ if (sprd_mcdt_chan_fifo_sts(mcdt, chan->id, MCDT_ADC_FIFO_REAL_EMPTY)) {
+ dev_err(mcdt->dev, "Channel fifo is empty\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EBUSY;
+ }
+
+ avail = sprd_mcdt_adc_fifo_avail(mcdt, chan->id);
+ if (size > avail)
+ words = avail / 4;
+
+ while (i++ < words)
+ sprd_mcdt_adc_read_fifo(mcdt, chan->id, buf++);
+
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return words * 4;
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_read);
+
+/**
+ * sprd_mcdt_chan_int_enable - enable the interrupt mode for the MCDT channel
+ * @chan: the MCDT channel
+ * @water_mark: water mark to trigger a interrupt
+ * @cb: callback when a interrupt happened
+ *
+ * Now it only can enable fifo almost full interrupt for ADC channel and fifo
+ * almost empty interrupt for DAC channel. Morevoer for interrupt mode, user
+ * should use sprd_mcdt_chan_read() or sprd_mcdt_chan_write() to read or write
+ * data manually.
+ *
+ * For ADC channel, user can start to read data once receiving one fifo full
+ * interrupt. For DAC channel, user can start to write data once receiving one
+ * fifo empty interrupt or just call sprd_mcdt_chan_write() to write data
+ * directly.
+ *
+ * Returns 0 on success, or an error code on failure.
+ */
+int sprd_mcdt_chan_int_enable(struct sprd_mcdt_chan *chan, u32 water_mark,
+ struct sprd_mcdt_chan_callback *cb)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (chan->dma_enable || chan->int_enable) {
+ dev_err(mcdt->dev, "Failed to set interrupt mode.\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EINVAL;
+ }
+
+ switch (chan->type) {
+ case SPRD_MCDT_ADC_CHAN:
+ sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
+ sprd_mcdt_adc_set_watermark(mcdt, chan->id, water_mark,
+ MCDT_FIFO_LENGTH - 1);
+ sprd_mcdt_chan_int_en(mcdt, chan->id,
+ MCDT_ADC_FIFO_AF_INT, true);
+ sprd_mcdt_ap_int_enable(mcdt, chan->id, true);
+ break;
+
+ case SPRD_MCDT_DAC_CHAN:
+ sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
+ sprd_mcdt_dac_set_watermark(mcdt, chan->id,
+ MCDT_FIFO_LENGTH - 1, water_mark);
+ sprd_mcdt_chan_int_en(mcdt, chan->id,
+ MCDT_DAC_FIFO_AE_INT, true);
+ sprd_mcdt_ap_int_enable(mcdt, chan->id, true);
+ break;
+
+ default:
+ dev_err(mcdt->dev, "Unsupported channel type\n");
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ chan->cb = cb;
+ chan->int_enable = true;
+ }
+
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_int_enable);
+
+/**
+ * sprd_mcdt_chan_int_disable - disable the interrupt mode for the MCDT channel
+ * @chan: the MCDT channel
+ */
+void sprd_mcdt_chan_int_disable(struct sprd_mcdt_chan *chan)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (!chan->int_enable) {
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return;
+ }
+
+ switch (chan->type) {
+ case SPRD_MCDT_ADC_CHAN:
+ sprd_mcdt_chan_int_en(mcdt, chan->id,
+ MCDT_ADC_FIFO_AF_INT, false);
+ sprd_mcdt_chan_int_clear(mcdt, chan->id, MCDT_ADC_FIFO_AF_INT);
+ sprd_mcdt_ap_int_enable(mcdt, chan->id, false);
+ break;
+
+ case SPRD_MCDT_DAC_CHAN:
+ sprd_mcdt_chan_int_en(mcdt, chan->id,
+ MCDT_DAC_FIFO_AE_INT, false);
+ sprd_mcdt_chan_int_clear(mcdt, chan->id, MCDT_DAC_FIFO_AE_INT);
+ sprd_mcdt_ap_int_enable(mcdt, chan->id, false);
+ break;
+
+ default:
+ break;
+ }
+
+ chan->int_enable = false;
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_int_disable);
+
+/**
+ * sprd_mcdt_chan_dma_enable - enable the DMA mode for the MCDT channel
+ * @chan: the MCDT channel
+ * @dma_chan: specify which DMA channel will be used for this MCDT channel
+ * @water_mark: water mark to trigger a DMA request
+ *
+ * Enable the DMA mode for the MCDT channel, that means we can use DMA to
+ * transfer data to the channel fifo and do not need reading/writing data
+ * manually.
+ *
+ * Returns 0 on success, or an error code on failure.
+ */
+int sprd_mcdt_chan_dma_enable(struct sprd_mcdt_chan *chan,
+ enum sprd_mcdt_dma_chan dma_chan,
+ u32 water_mark)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (chan->dma_enable || chan->int_enable ||
+ dma_chan > SPRD_MCDT_DMA_CH4) {
+ dev_err(mcdt->dev, "Failed to set DMA mode\n");
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return -EINVAL;
+ }
+
+ switch (chan->type) {
+ case SPRD_MCDT_ADC_CHAN:
+ sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
+ sprd_mcdt_adc_set_watermark(mcdt, chan->id,
+ water_mark, MCDT_FIFO_LENGTH - 1);
+ sprd_mcdt_adc_dma_enable(mcdt, chan->id, true);
+ sprd_mcdt_adc_dma_chn_select(mcdt, chan->id, dma_chan);
+ sprd_mcdt_adc_dma_ack_select(mcdt, chan->id, dma_chan);
+ break;
+
+ case SPRD_MCDT_DAC_CHAN:
+ sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
+ sprd_mcdt_dac_set_watermark(mcdt, chan->id,
+ MCDT_FIFO_LENGTH - 1, water_mark);
+ sprd_mcdt_dac_dma_enable(mcdt, chan->id, true);
+ sprd_mcdt_dac_dma_chn_select(mcdt, chan->id, dma_chan);
+ sprd_mcdt_dac_dma_ack_select(mcdt, chan->id, dma_chan);
+ break;
+
+ default:
+ dev_err(mcdt->dev, "Unsupported channel type\n");
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ chan->dma_enable = true;
+
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_dma_enable);
+
+/**
+ * sprd_mcdt_chan_dma_disable - disable the DMA mode for the MCDT channel
+ * @chan: the MCDT channel
+ */
+void sprd_mcdt_chan_dma_disable(struct sprd_mcdt_chan *chan)
+{
+ struct sprd_mcdt_dev *mcdt = chan->mcdt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcdt->lock, flags);
+
+ if (!chan->dma_enable) {
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+ return;
+ }
+
+ switch (chan->type) {
+ case SPRD_MCDT_ADC_CHAN:
+ sprd_mcdt_adc_dma_enable(mcdt, chan->id, false);
+ sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
+ break;
+
+ case SPRD_MCDT_DAC_CHAN:
+ sprd_mcdt_dac_dma_enable(mcdt, chan->id, false);
+ sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
+ break;
+
+ default:
+ break;
+ }
+
+ chan->dma_enable = false;
+ spin_unlock_irqrestore(&mcdt->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_chan_dma_disable);
+
+/**
+ * sprd_mcdt_request_chan - request one MCDT channel
+ * @channel: channel id
+ * @type: channel type, it can be one ADC channel or DAC channel
+ *
+ * Rreturn NULL if no available channel.
+ */
+struct sprd_mcdt_chan *sprd_mcdt_request_chan(u8 channel,
+ enum sprd_mcdt_channel_type type)
+{
+ struct sprd_mcdt_chan *temp;
+
+ mutex_lock(&sprd_mcdt_list_mutex);
+
+ list_for_each_entry(temp, &sprd_mcdt_chan_list, list) {
+ if (temp->type == type && temp->id == channel) {
+ list_del_init(&temp->list);
+ break;
+ }
+ }
+
+ if (list_entry_is_head(temp, &sprd_mcdt_chan_list, list))
+ temp = NULL;
+
+ mutex_unlock(&sprd_mcdt_list_mutex);
+
+ return temp;
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_request_chan);
+
+/**
+ * sprd_mcdt_free_chan - free one MCDT channel
+ * @chan: the channel to be freed
+ */
+void sprd_mcdt_free_chan(struct sprd_mcdt_chan *chan)
+{
+ struct sprd_mcdt_chan *temp;
+
+ sprd_mcdt_chan_dma_disable(chan);
+ sprd_mcdt_chan_int_disable(chan);
+
+ mutex_lock(&sprd_mcdt_list_mutex);
+
+ list_for_each_entry(temp, &sprd_mcdt_chan_list, list) {
+ if (temp == chan) {
+ mutex_unlock(&sprd_mcdt_list_mutex);
+ return;
+ }
+ }
+
+ list_add_tail(&chan->list, &sprd_mcdt_chan_list);
+ mutex_unlock(&sprd_mcdt_list_mutex);
+}
+EXPORT_SYMBOL_GPL(sprd_mcdt_free_chan);
+
+static void sprd_mcdt_init_chans(struct sprd_mcdt_dev *mcdt,
+ struct resource *res)
+{
+ int i;
+
+ for (i = 0; i < MCDT_CHANNEL_NUM; i++) {
+ struct sprd_mcdt_chan *chan = &mcdt->chan[i];
+
+ if (i < MCDT_ADC_CHANNEL_NUM) {
+ chan->id = i;
+ chan->type = SPRD_MCDT_ADC_CHAN;
+ chan->fifo_phys = res->start + MCDT_CH0_RXD + i * 4;
+ } else {
+ chan->id = i - MCDT_ADC_CHANNEL_NUM;
+ chan->type = SPRD_MCDT_DAC_CHAN;
+ chan->fifo_phys = res->start + MCDT_CH0_TXD +
+ (i - MCDT_ADC_CHANNEL_NUM) * 4;
+ }
+
+ chan->mcdt = mcdt;
+ INIT_LIST_HEAD(&chan->list);
+
+ mutex_lock(&sprd_mcdt_list_mutex);
+ list_add_tail(&chan->list, &sprd_mcdt_chan_list);
+ mutex_unlock(&sprd_mcdt_list_mutex);
+ }
+}
+
+static int sprd_mcdt_probe(struct platform_device *pdev)
+{
+ struct sprd_mcdt_dev *mcdt;
+ struct resource *res;
+ int ret, irq;
+
+ mcdt = devm_kzalloc(&pdev->dev, sizeof(*mcdt), GFP_KERNEL);
+ if (!mcdt)
+ return -ENOMEM;
+
+ mcdt->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mcdt->base))
+ return PTR_ERR(mcdt->base);
+
+ mcdt->dev = &pdev->dev;
+ spin_lock_init(&mcdt->lock);
+ platform_set_drvdata(pdev, mcdt);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, sprd_mcdt_irq_handler,
+ 0, "sprd-mcdt", mcdt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request MCDT IRQ\n");
+ return ret;
+ }
+
+ sprd_mcdt_init_chans(mcdt, res);
+
+ return 0;
+}
+
+static void sprd_mcdt_remove(struct platform_device *pdev)
+{
+ struct sprd_mcdt_chan *chan, *temp;
+
+ mutex_lock(&sprd_mcdt_list_mutex);
+
+ list_for_each_entry_safe(chan, temp, &sprd_mcdt_chan_list, list)
+ list_del(&chan->list);
+
+ mutex_unlock(&sprd_mcdt_list_mutex);
+}
+
+static const struct of_device_id sprd_mcdt_of_match[] = {
+ { .compatible = "sprd,sc9860-mcdt", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sprd_mcdt_of_match);
+
+static struct platform_driver sprd_mcdt_driver = {
+ .probe = sprd_mcdt_probe,
+ .remove_new = sprd_mcdt_remove,
+ .driver = {
+ .name = "sprd-mcdt",
+ .of_match_table = sprd_mcdt_of_match,
+ },
+};
+
+module_platform_driver(sprd_mcdt_driver);
+
+MODULE_DESCRIPTION("Spreadtrum Multi-Channel Data Transfer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sprd/sprd-mcdt.h b/sound/soc/sprd/sprd-mcdt.h
new file mode 100644
index 0000000000..679e3af3ba
--- /dev/null
+++ b/sound/soc/sprd/sprd-mcdt.h
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __SPRD_MCDT_H
+#define __SPRD_MCDT_H
+
+enum sprd_mcdt_channel_type {
+ SPRD_MCDT_DAC_CHAN,
+ SPRD_MCDT_ADC_CHAN,
+ SPRD_MCDT_UNKNOWN_CHAN,
+};
+
+enum sprd_mcdt_dma_chan {
+ SPRD_MCDT_DMA_CH0,
+ SPRD_MCDT_DMA_CH1,
+ SPRD_MCDT_DMA_CH2,
+ SPRD_MCDT_DMA_CH3,
+ SPRD_MCDT_DMA_CH4,
+};
+
+struct sprd_mcdt_chan_callback {
+ void (*notify)(void *data);
+ void *data;
+};
+
+/**
+ * struct sprd_mcdt_chan - this struct represents a single channel instance
+ * @mcdt: the mcdt controller
+ * @id: channel id
+ * @fifo_phys: channel fifo physical address which is used for DMA transfer
+ * @type: channel type
+ * @cb: channel fifo interrupt's callback interface to notify the fifo events
+ * @dma_enable: indicate if use DMA mode to transfer data
+ * @int_enable: indicate if use interrupt mode to notify users to read or
+ * write data manually
+ * @list: used to link into the global list
+ *
+ * Note: users should not modify any members of this structure.
+ */
+struct sprd_mcdt_chan {
+ struct sprd_mcdt_dev *mcdt;
+ u8 id;
+ unsigned long fifo_phys;
+ enum sprd_mcdt_channel_type type;
+ enum sprd_mcdt_dma_chan dma_chan;
+ struct sprd_mcdt_chan_callback *cb;
+ bool dma_enable;
+ bool int_enable;
+ struct list_head list;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SPRD_MCDT)
+struct sprd_mcdt_chan *sprd_mcdt_request_chan(u8 channel,
+ enum sprd_mcdt_channel_type type);
+void sprd_mcdt_free_chan(struct sprd_mcdt_chan *chan);
+
+int sprd_mcdt_chan_write(struct sprd_mcdt_chan *chan, char *tx_buf, u32 size);
+int sprd_mcdt_chan_read(struct sprd_mcdt_chan *chan, char *rx_buf, u32 size);
+int sprd_mcdt_chan_int_enable(struct sprd_mcdt_chan *chan, u32 water_mark,
+ struct sprd_mcdt_chan_callback *cb);
+void sprd_mcdt_chan_int_disable(struct sprd_mcdt_chan *chan);
+
+int sprd_mcdt_chan_dma_enable(struct sprd_mcdt_chan *chan,
+ enum sprd_mcdt_dma_chan dma_chan, u32 water_mark);
+void sprd_mcdt_chan_dma_disable(struct sprd_mcdt_chan *chan);
+
+#else
+
+struct sprd_mcdt_chan *sprd_mcdt_request_chan(u8 channel,
+ enum sprd_mcdt_channel_type type)
+{
+ return NULL;
+}
+
+void sprd_mcdt_free_chan(struct sprd_mcdt_chan *chan)
+{ }
+
+int sprd_mcdt_chan_write(struct sprd_mcdt_chan *chan, char *tx_buf, u32 size)
+{
+ return -EINVAL;
+}
+
+int sprd_mcdt_chan_read(struct sprd_mcdt_chan *chan, char *rx_buf, u32 size)
+{
+ return 0;
+}
+
+int sprd_mcdt_chan_int_enable(struct sprd_mcdt_chan *chan, u32 water_mark,
+ struct sprd_mcdt_chan_callback *cb)
+{
+ return -EINVAL;
+}
+
+void sprd_mcdt_chan_int_disable(struct sprd_mcdt_chan *chan)
+{ }
+
+int sprd_mcdt_chan_dma_enable(struct sprd_mcdt_chan *chan,
+ enum sprd_mcdt_dma_chan dma_chan, u32 water_mark)
+{
+ return -EINVAL;
+}
+
+void sprd_mcdt_chan_dma_disable(struct sprd_mcdt_chan *chan)
+{ }
+
+#endif
+
+#endif /* __SPRD_MCDT_H */
diff --git a/sound/soc/sprd/sprd-pcm-compress.c b/sound/soc/sprd/sprd-pcm-compress.c
new file mode 100644
index 0000000000..6507c03cc8
--- /dev/null
+++ b/sound/soc/sprd/sprd-pcm-compress.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/sprd-dma.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+
+#include "sprd-pcm-dma.h"
+
+#define SPRD_COMPR_DMA_CHANS 2
+
+/* Default values if userspace does not set */
+#define SPRD_COMPR_MIN_FRAGMENT_SIZE SZ_8K
+#define SPRD_COMPR_MAX_FRAGMENT_SIZE SZ_128K
+#define SPRD_COMPR_MIN_NUM_FRAGMENTS 4
+#define SPRD_COMPR_MAX_NUM_FRAGMENTS 64
+
+/* DSP FIFO size */
+#define SPRD_COMPR_MCDT_EMPTY_WMK 0
+#define SPRD_COMPR_MCDT_FIFO_SIZE 512
+
+/* Stage 0 IRAM buffer size definition */
+#define SPRD_COMPR_IRAM_BUF_SIZE SZ_32K
+#define SPRD_COMPR_IRAM_INFO_SIZE (sizeof(struct sprd_compr_playinfo))
+#define SPRD_COMPR_IRAM_LINKLIST_SIZE (1024 - SPRD_COMPR_IRAM_INFO_SIZE)
+#define SPRD_COMPR_IRAM_SIZE (SPRD_COMPR_IRAM_BUF_SIZE + \
+ SPRD_COMPR_IRAM_INFO_SIZE + \
+ SPRD_COMPR_IRAM_LINKLIST_SIZE)
+
+/* Stage 1 DDR buffer size definition */
+#define SPRD_COMPR_AREA_BUF_SIZE SZ_2M
+#define SPRD_COMPR_AREA_LINKLIST_SIZE 1024
+#define SPRD_COMPR_AREA_SIZE (SPRD_COMPR_AREA_BUF_SIZE + \
+ SPRD_COMPR_AREA_LINKLIST_SIZE)
+
+struct sprd_compr_dma {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t phys;
+ void *virt;
+ int trans_len;
+};
+
+/*
+ * The Spreadtrum Audio compress offload mode will use 2-stage DMA transfer to
+ * save power. That means we can request 2 dma channels, one for source channel,
+ * and another one for destination channel. Once the source channel's transaction
+ * is done, it will trigger the destination channel's transaction automatically
+ * by hardware signal.
+ *
+ * For 2-stage DMA transfer, we can allocate 2 buffers: IRAM buffer (always
+ * power-on) and DDR buffer. The source channel will transfer data from IRAM
+ * buffer to the DSP fifo to decoding/encoding, once IRAM buffer is empty by
+ * transferring done, the destination channel will start to transfer data from
+ * DDR buffer to IRAM buffer.
+ *
+ * Since the DSP fifo is only 512B, IRAM buffer is allocated by 32K, and DDR
+ * buffer is larger to 2M. That means only the IRAM 32k data is transferred
+ * done, we can wake up the AP system to transfer data from DDR to IRAM, and
+ * other time the AP system can be suspended to save power.
+ */
+struct sprd_compr_stream {
+ struct snd_compr_stream *cstream;
+ struct sprd_compr_ops *compr_ops;
+ struct sprd_compr_dma dma[SPRD_COMPR_DMA_CHANS];
+
+ /* DMA engine channel number */
+ int num_channels;
+
+ /* Stage 0 IRAM buffer */
+ struct snd_dma_buffer iram_buffer;
+ /* Stage 1 DDR buffer */
+ struct snd_dma_buffer compr_buffer;
+
+ /* DSP play information IRAM buffer */
+ dma_addr_t info_phys;
+ void *info_area;
+ int info_size;
+
+ /* Data size copied to IRAM buffer */
+ int copied_total;
+ /* Total received data size from userspace */
+ int received_total;
+ /* Stage 0 IRAM buffer received data size */
+ int received_stage0;
+ /* Stage 1 DDR buffer received data size */
+ int received_stage1;
+ /* Stage 1 DDR buffer pointer */
+ int stage1_pointer;
+};
+
+static int sprd_platform_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ int cmd);
+
+static void sprd_platform_compr_drain_notify(void *arg)
+{
+ struct snd_compr_stream *cstream = arg;
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+
+ memset(stream->info_area, 0, sizeof(struct sprd_compr_playinfo));
+
+ snd_compr_drain_notify(cstream);
+}
+
+static void sprd_platform_compr_dma_complete(void *data)
+{
+ struct snd_compr_stream *cstream = data;
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct sprd_compr_dma *dma = &stream->dma[1];
+
+ /* Update data size copied to IRAM buffer */
+ stream->copied_total += dma->trans_len;
+ if (stream->copied_total > stream->received_total)
+ stream->copied_total = stream->received_total;
+
+ snd_compr_fragment_elapsed(cstream);
+}
+
+static int sprd_platform_compr_dma_config(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ int channel)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct device *dev = component->dev;
+ struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
+ struct sprd_pcm_dma_params *dma_params = data->dma_params;
+ struct sprd_compr_dma *dma = &stream->dma[channel];
+ struct dma_slave_config config = { };
+ struct sprd_dma_linklist link = { };
+ enum dma_transfer_direction dir;
+ struct scatterlist *sg, *sgt;
+ enum dma_slave_buswidth bus_width;
+ int period, period_cnt, sg_num = 2;
+ dma_addr_t src_addr, dst_addr;
+ unsigned long flags;
+ int ret, j;
+
+ if (!dma_params) {
+ dev_err(dev, "no dma parameters setting\n");
+ return -EINVAL;
+ }
+
+ dma->chan = dma_request_slave_channel(dev,
+ dma_params->chan_name[channel]);
+ if (!dma->chan) {
+ dev_err(dev, "failed to request dma channel\n");
+ return -ENODEV;
+ }
+
+ sgt = sg = devm_kcalloc(dev, sg_num, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto sg_err;
+ }
+
+ switch (channel) {
+ case 0:
+ bus_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ period = (SPRD_COMPR_MCDT_FIFO_SIZE - SPRD_COMPR_MCDT_EMPTY_WMK) * 4;
+ period_cnt = params->buffer.fragment_size / period;
+ src_addr = stream->iram_buffer.addr;
+ dst_addr = dma_params->dev_phys[channel];
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_SRC_CHN1,
+ SPRD_DMA_TRANS_DONE_TRG,
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_TRANS_INT);
+ break;
+
+ case 1:
+ bus_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ period = params->buffer.fragment_size;
+ period_cnt = params->buffer.fragments;
+ src_addr = stream->compr_buffer.addr;
+ dst_addr = stream->iram_buffer.addr;
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_DST_CHN1,
+ SPRD_DMA_TRANS_DONE_TRG,
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_TRANS_INT);
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto config_err;
+ }
+
+ dma->trans_len = period * period_cnt;
+
+ config.src_maxburst = period;
+ config.src_addr_width = bus_width;
+ config.dst_addr_width = bus_width;
+ if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+ config.src_addr = src_addr;
+ config.dst_addr = dst_addr;
+ dir = DMA_MEM_TO_DEV;
+ } else {
+ config.src_addr = dst_addr;
+ config.dst_addr = src_addr;
+ dir = DMA_DEV_TO_MEM;
+ }
+
+ sg_init_table(sgt, sg_num);
+ for (j = 0; j < sg_num; j++, sgt++) {
+ sg_dma_len(sgt) = dma->trans_len;
+ sg_dma_address(sgt) = dst_addr;
+ }
+
+ /*
+ * Configure the link-list address for the DMA engine link-list
+ * mode.
+ */
+ link.virt_addr = (unsigned long)dma->virt;
+ link.phy_addr = dma->phys;
+
+ ret = dmaengine_slave_config(dma->chan, &config);
+ if (ret) {
+ dev_err(dev,
+ "failed to set slave configuration: %d\n", ret);
+ goto config_err;
+ }
+
+ /*
+ * We configure the DMA request mode, interrupt mode, channel
+ * mode and channel trigger mode by the flags.
+ */
+ dma->desc = dma->chan->device->device_prep_slave_sg(dma->chan, sg,
+ sg_num, dir,
+ flags, &link);
+ if (!dma->desc) {
+ dev_err(dev, "failed to prepare slave sg\n");
+ ret = -ENOMEM;
+ goto config_err;
+ }
+
+ /* Only channel 1 transfer can wake up the AP system. */
+ if (!params->no_wake_mode && channel == 1) {
+ dma->desc->callback = sprd_platform_compr_dma_complete;
+ dma->desc->callback_param = cstream;
+ }
+
+ devm_kfree(dev, sg);
+
+ return 0;
+
+config_err:
+ devm_kfree(dev, sg);
+sg_err:
+ dma_release_channel(dma->chan);
+ return ret;
+}
+
+static int sprd_platform_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct device *dev = component->dev;
+ struct sprd_compr_params compr_params = { };
+ int ret;
+
+ /*
+ * Configure the DMA engine 2-stage transfer mode. Channel 1 set as the
+ * destination channel, and channel 0 set as the source channel, that
+ * means once the source channel's transaction is done, it will trigger
+ * the destination channel's transaction automatically.
+ */
+ ret = sprd_platform_compr_dma_config(component, cstream, params, 1);
+ if (ret) {
+ dev_err(dev, "failed to config stage 1 DMA: %d\n", ret);
+ return ret;
+ }
+
+ ret = sprd_platform_compr_dma_config(component, cstream, params, 0);
+ if (ret) {
+ dev_err(dev, "failed to config stage 0 DMA: %d\n", ret);
+ goto config_err;
+ }
+
+ compr_params.direction = cstream->direction;
+ compr_params.sample_rate = params->codec.sample_rate;
+ compr_params.channels = stream->num_channels;
+ compr_params.info_phys = stream->info_phys;
+ compr_params.info_size = stream->info_size;
+ compr_params.rate = params->codec.bit_rate;
+ compr_params.format = params->codec.id;
+
+ ret = stream->compr_ops->set_params(cstream->direction, &compr_params);
+ if (ret) {
+ dev_err(dev, "failed to set parameters: %d\n", ret);
+ goto params_err;
+ }
+
+ return 0;
+
+params_err:
+ dma_release_channel(stream->dma[0].chan);
+config_err:
+ dma_release_channel(stream->dma[1].chan);
+ return ret;
+}
+
+static int sprd_platform_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct device *dev = component->dev;
+ struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
+ struct sprd_compr_stream *stream;
+ struct sprd_compr_callback cb;
+ int stream_id = cstream->direction, ret;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ stream = devm_kzalloc(dev, sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->cstream = cstream;
+ stream->num_channels = 2;
+ stream->compr_ops = data->ops;
+
+ /*
+ * Allocate the stage 0 IRAM buffer size, including the DMA 0
+ * link-list size and play information of DSP address size.
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_IRAM, dev,
+ SPRD_COMPR_IRAM_SIZE, &stream->iram_buffer);
+ if (ret < 0)
+ goto err_iram;
+
+ /* Use to save link-list configuration for DMA 0. */
+ stream->dma[0].virt = stream->iram_buffer.area + SPRD_COMPR_IRAM_SIZE;
+ stream->dma[0].phys = stream->iram_buffer.addr + SPRD_COMPR_IRAM_SIZE;
+
+ /* Use to update the current data offset of DSP. */
+ stream->info_phys = stream->iram_buffer.addr + SPRD_COMPR_IRAM_SIZE +
+ SPRD_COMPR_IRAM_LINKLIST_SIZE;
+ stream->info_area = stream->iram_buffer.area + SPRD_COMPR_IRAM_SIZE +
+ SPRD_COMPR_IRAM_LINKLIST_SIZE;
+ stream->info_size = SPRD_COMPR_IRAM_INFO_SIZE;
+
+ /*
+ * Allocate the stage 1 DDR buffer size, including the DMA 1 link-list
+ * size.
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev,
+ SPRD_COMPR_AREA_SIZE, &stream->compr_buffer);
+ if (ret < 0)
+ goto err_compr;
+
+ /* Use to save link-list configuration for DMA 1. */
+ stream->dma[1].virt = stream->compr_buffer.area + SPRD_COMPR_AREA_SIZE;
+ stream->dma[1].phys = stream->compr_buffer.addr + SPRD_COMPR_AREA_SIZE;
+
+ cb.drain_notify = sprd_platform_compr_drain_notify;
+ cb.drain_data = cstream;
+ ret = stream->compr_ops->open(stream_id, &cb);
+ if (ret) {
+ dev_err(dev, "failed to open compress platform: %d\n", ret);
+ goto err_open;
+ }
+
+ runtime->private_data = stream;
+ return 0;
+
+err_open:
+ snd_dma_free_pages(&stream->compr_buffer);
+err_compr:
+ snd_dma_free_pages(&stream->iram_buffer);
+err_iram:
+ devm_kfree(dev, stream);
+
+ return ret;
+}
+
+static int sprd_platform_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct device *dev = component->dev;
+ int stream_id = cstream->direction, i;
+
+ for (i = 0; i < stream->num_channels; i++) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (dma->chan) {
+ dma_release_channel(dma->chan);
+ dma->chan = NULL;
+ }
+ }
+
+ snd_dma_free_pages(&stream->compr_buffer);
+ snd_dma_free_pages(&stream->iram_buffer);
+
+ stream->compr_ops->close(stream_id);
+
+ devm_kfree(dev, stream);
+ return 0;
+}
+
+static int sprd_platform_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ int cmd)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct device *dev = component->dev;
+ int channels = stream->num_channels, ret = 0, i;
+ int stream_id = cstream->direction;
+
+ if (cstream->direction != SND_COMPRESS_PLAYBACK) {
+ dev_err(dev, "unsupported compress direction\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ for (i = channels - 1; i >= 0; i--) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (!dma->desc)
+ continue;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit request: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ for (i = channels - 1; i >= 0; i--) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (dma->chan)
+ dma_async_issue_pending(dma->chan);
+ }
+
+ ret = stream->compr_ops->start(stream_id);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ for (i = channels - 1; i >= 0; i--) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (dma->chan)
+ dmaengine_terminate_async(dma->chan);
+ }
+
+ stream->copied_total = 0;
+ stream->stage1_pointer = 0;
+ stream->received_total = 0;
+ stream->received_stage0 = 0;
+ stream->received_stage1 = 0;
+
+ ret = stream->compr_ops->stop(stream_id);
+ break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ for (i = channels - 1; i >= 0; i--) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (dma->chan)
+ dmaengine_pause(dma->chan);
+ }
+
+ ret = stream->compr_ops->pause(stream_id);
+ break;
+
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ for (i = channels - 1; i >= 0; i--) {
+ struct sprd_compr_dma *dma = &stream->dma[i];
+
+ if (dma->chan)
+ dmaengine_resume(dma->chan);
+ }
+
+ ret = stream->compr_ops->pause_release(stream_id);
+ break;
+
+ case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+ case SND_COMPR_TRIGGER_DRAIN:
+ ret = stream->compr_ops->drain(stream->received_total);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int sprd_platform_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ struct sprd_compr_playinfo *info =
+ (struct sprd_compr_playinfo *)stream->info_area;
+
+ tstamp->copied_total = stream->copied_total;
+ tstamp->pcm_io_frames = info->current_data_offset;
+
+ return 0;
+}
+
+static int sprd_platform_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
+{
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sprd_compr_stream *stream = runtime->private_data;
+ int avail_bytes, data_count = count;
+ void *dst;
+
+ /*
+ * We usually set fragment size as 32K, and the stage 0 IRAM buffer
+ * size is 32K too. So if now the received data size of the stage 0
+ * IRAM buffer is less than 32K, that means we have some available
+ * spaces for the stage 0 IRAM buffer.
+ */
+ if (stream->received_stage0 < runtime->fragment_size) {
+ avail_bytes = runtime->fragment_size - stream->received_stage0;
+ dst = stream->iram_buffer.area + stream->received_stage0;
+
+ if (avail_bytes >= data_count) {
+ /*
+ * Copy data to the stage 0 IRAM buffer directly if
+ * spaces are enough.
+ */
+ if (copy_from_user(dst, buf, data_count))
+ return -EFAULT;
+
+ stream->received_stage0 += data_count;
+ stream->copied_total += data_count;
+ goto copy_done;
+ } else {
+ /*
+ * If the data count is larger than the available spaces
+ * of the stage 0 IRAM buffer, we should copy one
+ * partial data to the stage 0 IRAM buffer, and copy
+ * the left to the stage 1 DDR buffer.
+ */
+ if (copy_from_user(dst, buf, avail_bytes))
+ return -EFAULT;
+
+ data_count -= avail_bytes;
+ stream->received_stage0 += avail_bytes;
+ stream->copied_total += avail_bytes;
+ buf += avail_bytes;
+ }
+ }
+
+ /*
+ * Copy data to the stage 1 DDR buffer if no spaces for the stage 0 IRAM
+ * buffer.
+ */
+ dst = stream->compr_buffer.area + stream->stage1_pointer;
+ if (data_count < stream->compr_buffer.bytes - stream->stage1_pointer) {
+ if (copy_from_user(dst, buf, data_count))
+ return -EFAULT;
+
+ stream->stage1_pointer += data_count;
+ } else {
+ avail_bytes = stream->compr_buffer.bytes - stream->stage1_pointer;
+
+ if (copy_from_user(dst, buf, avail_bytes))
+ return -EFAULT;
+
+ if (copy_from_user(stream->compr_buffer.area, buf + avail_bytes,
+ data_count - avail_bytes))
+ return -EFAULT;
+
+ stream->stage1_pointer = data_count - avail_bytes;
+ }
+
+ stream->received_stage1 += data_count;
+
+copy_done:
+ /* Update the copied data size. */
+ stream->received_total += count;
+ return count;
+}
+
+static int sprd_platform_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps)
+{
+ caps->direction = cstream->direction;
+ caps->min_fragment_size = SPRD_COMPR_MIN_FRAGMENT_SIZE;
+ caps->max_fragment_size = SPRD_COMPR_MAX_FRAGMENT_SIZE;
+ caps->min_fragments = SPRD_COMPR_MIN_NUM_FRAGMENTS;
+ caps->max_fragments = SPRD_COMPR_MAX_NUM_FRAGMENTS;
+ caps->num_codecs = 2;
+ caps->codecs[0] = SND_AUDIOCODEC_MP3;
+ caps->codecs[1] = SND_AUDIOCODEC_AAC;
+
+ return 0;
+}
+
+static int
+sprd_platform_compr_get_codec_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_codec_caps *codec)
+{
+ switch (codec->codec) {
+ case SND_AUDIOCODEC_MP3:
+ codec->num_descriptors = 2;
+ codec->descriptor[0].max_ch = 2;
+ codec->descriptor[0].bit_rate[0] = 320;
+ codec->descriptor[0].bit_rate[1] = 128;
+ codec->descriptor[0].num_bitrates = 2;
+ codec->descriptor[0].profiles = 0;
+ codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
+ codec->descriptor[0].formats = 0;
+ break;
+
+ case SND_AUDIOCODEC_AAC:
+ codec->num_descriptors = 2;
+ codec->descriptor[1].max_ch = 2;
+ codec->descriptor[1].bit_rate[0] = 320;
+ codec->descriptor[1].bit_rate[1] = 128;
+ codec->descriptor[1].num_bitrates = 2;
+ codec->descriptor[1].profiles = 0;
+ codec->descriptor[1].modes = 0;
+ codec->descriptor[1].formats = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct snd_compress_ops sprd_platform_compress_ops = {
+ .open = sprd_platform_compr_open,
+ .free = sprd_platform_compr_free,
+ .set_params = sprd_platform_compr_set_params,
+ .trigger = sprd_platform_compr_trigger,
+ .pointer = sprd_platform_compr_pointer,
+ .copy = sprd_platform_compr_copy,
+ .get_caps = sprd_platform_compr_get_caps,
+ .get_codec_caps = sprd_platform_compr_get_codec_caps,
+};
+
+MODULE_DESCRIPTION("Spreadtrum ASoC Compress Platform Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:compress-platform");
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
new file mode 100644
index 0000000000..48d90616b2
--- /dev/null
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/sprd-dma.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "sprd-pcm-dma.h"
+
+#define SPRD_PCM_DMA_LINKLIST_SIZE 64
+#define SPRD_PCM_DMA_BRUST_LEN 640
+
+struct sprd_pcm_dma_data {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t phys;
+ void *virt;
+ int pre_pointer;
+};
+
+struct sprd_pcm_dma_private {
+ struct snd_pcm_substream *substream;
+ struct sprd_pcm_dma_params *params;
+ struct sprd_pcm_dma_data data[SPRD_PCM_CHANNEL_MAX];
+ int hw_chan;
+ int dma_addr_offset;
+};
+
+static const struct snd_pcm_hardware sprd_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .period_bytes_min = 1,
+ .period_bytes_max = 64 * 1024,
+ .periods_min = 1,
+ .periods_max = PAGE_SIZE / SPRD_PCM_DMA_LINKLIST_SIZE,
+ .buffer_bytes_max = 64 * 1024,
+};
+
+static int sprd_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct device *dev = component->dev;
+ struct sprd_pcm_dma_private *dma_private;
+ int hw_chan = SPRD_PCM_CHANNEL_MAX;
+ int size, ret, i;
+
+ snd_soc_set_runtime_hwparams(substream, &sprd_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ SPRD_PCM_DMA_BRUST_LEN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ SPRD_PCM_DMA_BRUST_LEN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ dma_private = devm_kzalloc(dev, sizeof(*dma_private), GFP_KERNEL);
+ if (!dma_private)
+ return -ENOMEM;
+
+ size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
+
+ for (i = 0; i < hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ data->virt = dmam_alloc_coherent(dev, size, &data->phys,
+ GFP_KERNEL);
+ if (!data->virt) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ dma_private->hw_chan = hw_chan;
+ runtime->private_data = dma_private;
+ dma_private->substream = substream;
+
+ return 0;
+
+error:
+ for (i = 0; i < hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->virt)
+ dmam_free_coherent(dev, size, data->virt, data->phys);
+ }
+
+ devm_kfree(dev, dma_private);
+ return ret;
+}
+
+static int sprd_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct device *dev = component->dev;
+ int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
+ int i;
+
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ dmam_free_coherent(dev, size, data->virt, data->phys);
+ }
+
+ devm_kfree(dev, dma_private);
+
+ return 0;
+}
+
+static void sprd_pcm_dma_complete(void *data)
+{
+ struct sprd_pcm_dma_private *dma_private = data;
+ struct snd_pcm_substream *substream = dma_private->substream;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ int i;
+
+ for (i = 0; i < SPRD_PCM_CHANNEL_MAX; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan) {
+ dma_release_channel(data->chan);
+ data->chan = NULL;
+ }
+ }
+}
+
+static int sprd_pcm_request_dma_channel(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int channels)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct device *dev = component->dev;
+ struct sprd_pcm_dma_params *dma_params = dma_private->params;
+ int i;
+
+ if (channels > SPRD_PCM_CHANNEL_MAX) {
+ dev_err(dev, "invalid dma channel number:%d\n", channels);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < channels; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ data->chan = dma_request_slave_channel(dev,
+ dma_params->chan_name[i]);
+ if (!data->chan) {
+ dev_err(dev, "failed to request dma channel:%s\n",
+ dma_params->chan_name[i]);
+ sprd_pcm_release_dma_channel(substream);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int sprd_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sprd_pcm_dma_params *dma_params;
+ size_t totsize = params_buffer_bytes(params);
+ size_t period = params_period_bytes(params);
+ int channels = params_channels(params);
+ int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct scatterlist *sg;
+ unsigned long flags;
+ int ret, i, j, sg_num;
+
+ dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
+ if (!dma_params) {
+ dev_warn(component->dev, "no dma parameters setting\n");
+ dma_private->params = NULL;
+ return 0;
+ }
+
+ if (!dma_private->params) {
+ dma_private->params = dma_params;
+ ret = sprd_pcm_request_dma_channel(component,
+ substream, channels);
+ if (ret)
+ return ret;
+ }
+
+ sg_num = totsize / period;
+ dma_private->dma_addr_offset = totsize / channels;
+
+ sg = devm_kcalloc(component->dev, sg_num, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto sg_err;
+ }
+
+ for (i = 0; i < channels; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+ struct dma_chan *chan = data->chan;
+ struct dma_slave_config config = { };
+ struct sprd_dma_linklist link = { };
+ enum dma_transfer_direction dir;
+ struct scatterlist *sgt = sg;
+
+ config.src_maxburst = dma_params->fragment_len[i];
+ config.src_addr_width = dma_params->datawidth[i];
+ config.dst_addr_width = dma_params->datawidth[i];
+ if (is_playback) {
+ config.src_addr = runtime->dma_addr +
+ i * dma_private->dma_addr_offset;
+ config.dst_addr = dma_params->dev_phys[i];
+ dir = DMA_MEM_TO_DEV;
+ } else {
+ config.src_addr = dma_params->dev_phys[i];
+ config.dst_addr = runtime->dma_addr +
+ i * dma_private->dma_addr_offset;
+ dir = DMA_DEV_TO_MEM;
+ }
+
+ sg_init_table(sgt, sg_num);
+ for (j = 0; j < sg_num; j++, sgt++) {
+ u32 sg_len = period / channels;
+
+ sg_dma_len(sgt) = sg_len;
+ sg_dma_address(sgt) = runtime->dma_addr +
+ i * dma_private->dma_addr_offset + sg_len * j;
+ }
+
+ /*
+ * Configure the link-list address for the DMA engine link-list
+ * mode.
+ */
+ link.virt_addr = (unsigned long)data->virt;
+ link.phy_addr = data->phys;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(component->dev,
+ "failed to set slave configuration: %d\n", ret);
+ goto config_err;
+ }
+
+ /*
+ * We configure the DMA request mode, interrupt mode, channel
+ * mode and channel trigger mode by the flags.
+ */
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
+ data->desc = chan->device->device_prep_slave_sg(chan, sg,
+ sg_num, dir,
+ flags, &link);
+ if (!data->desc) {
+ dev_err(component->dev, "failed to prepare slave sg\n");
+ ret = -ENOMEM;
+ goto config_err;
+ }
+
+ if (!runtime->no_period_wakeup) {
+ data->desc->callback = sprd_pcm_dma_complete;
+ data->desc->callback_param = dma_private;
+ }
+ }
+
+ devm_kfree(component->dev, sg);
+
+ return 0;
+
+config_err:
+ devm_kfree(component->dev, sg);
+sg_err:
+ sprd_pcm_release_dma_channel(substream);
+ return ret;
+}
+
+static int sprd_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ sprd_pcm_release_dma_channel(substream);
+
+ return 0;
+}
+
+static int sprd_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct sprd_pcm_dma_private *dma_private =
+ substream->runtime->private_data;
+ int ret = 0, i;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (!data->desc)
+ continue;
+
+ data->cookie = dmaengine_submit(data->desc);
+ ret = dma_submit_error(data->cookie);
+ if (ret) {
+ dev_err(component->dev,
+ "failed to submit dma request: %d\n",
+ ret);
+ return ret;
+ }
+
+ dma_async_issue_pending(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_resume(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_terminate_async(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_pause(data->chan);
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ int pointer[SPRD_PCM_CHANNEL_MAX];
+ int bytes_of_pointer = 0, sel_max = 0, i;
+ snd_pcm_uframes_t x;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (!data->chan)
+ continue;
+
+ status = dmaengine_tx_status(data->chan, data->cookie, &state);
+ if (status == DMA_ERROR) {
+ dev_err(component->dev,
+ "failed to get dma channel %d status\n", i);
+ return 0;
+ }
+
+ /*
+ * We just get current transfer address from the DMA engine, so
+ * we need convert to current pointer.
+ */
+ pointer[i] = state.residue - runtime->dma_addr -
+ i * dma_private->dma_addr_offset;
+
+ if (i == 0) {
+ bytes_of_pointer = pointer[i];
+ sel_max = pointer[i] < data->pre_pointer ? 1 : 0;
+ } else {
+ sel_max ^= pointer[i] < data->pre_pointer ? 1 : 0;
+
+ if (sel_max)
+ bytes_of_pointer =
+ max(pointer[i], pointer[i - 1]) << 1;
+ else
+ bytes_of_pointer =
+ min(pointer[i], pointer[i - 1]) << 1;
+ }
+
+ data->pre_pointer = pointer[i];
+ }
+
+ x = bytes_to_frames(runtime, bytes_of_pointer);
+ if (x == runtime->buffer_size)
+ x = 0;
+
+ return x;
+}
+
+static int sprd_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ card->dev,
+ sprd_pcm_hardware.buffer_bytes_max);
+}
+
+static const struct snd_soc_component_driver sprd_soc_component = {
+ .name = DRV_NAME,
+ .open = sprd_pcm_open,
+ .close = sprd_pcm_close,
+ .hw_params = sprd_pcm_hw_params,
+ .hw_free = sprd_pcm_hw_free,
+ .trigger = sprd_pcm_trigger,
+ .pointer = sprd_pcm_pointer,
+ .pcm_construct = sprd_pcm_new,
+ .compress_ops = &sprd_platform_compress_ops,
+};
+
+static int sprd_soc_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = of_reserved_mem_device_init_by_idx(&pdev->dev, np, 0);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "no reserved DMA memory for audio platform device\n");
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &sprd_soc_component,
+ NULL, 0);
+ if (ret)
+ dev_err(&pdev->dev, "could not register platform:%d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id sprd_pcm_of_match[] = {
+ { .compatible = "sprd,pcm-platform", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_pcm_of_match);
+
+static struct platform_driver sprd_pcm_driver = {
+ .driver = {
+ .name = "sprd-pcm-audio",
+ .of_match_table = sprd_pcm_of_match,
+ },
+
+ .probe = sprd_soc_platform_probe,
+};
+
+module_platform_driver(sprd_pcm_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ASoC PCM DMA");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sprd-audio");
diff --git a/sound/soc/sprd/sprd-pcm-dma.h b/sound/soc/sprd/sprd-pcm-dma.h
new file mode 100644
index 0000000000..be5e385f5e
--- /dev/null
+++ b/sound/soc/sprd/sprd-pcm-dma.h
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __SPRD_PCM_DMA_H
+#define __SPRD_PCM_DMA_H
+
+#define DRV_NAME "sprd_pcm_dma"
+#define SPRD_PCM_CHANNEL_MAX 2
+
+extern const struct snd_compress_ops sprd_platform_compress_ops;
+
+struct sprd_pcm_dma_params {
+ dma_addr_t dev_phys[SPRD_PCM_CHANNEL_MAX];
+ u32 datawidth[SPRD_PCM_CHANNEL_MAX];
+ u32 fragment_len[SPRD_PCM_CHANNEL_MAX];
+ const char *chan_name[SPRD_PCM_CHANNEL_MAX];
+};
+
+struct sprd_compr_playinfo {
+ int total_time;
+ int current_time;
+ int total_data_length;
+ int current_data_offset;
+};
+
+struct sprd_compr_params {
+ u32 direction;
+ u32 rate;
+ u32 sample_rate;
+ u32 channels;
+ u32 format;
+ u32 period;
+ u32 periods;
+ u32 info_phys;
+ u32 info_size;
+};
+
+struct sprd_compr_callback {
+ void (*drain_notify)(void *data);
+ void *drain_data;
+};
+
+struct sprd_compr_ops {
+ int (*open)(int str_id, struct sprd_compr_callback *cb);
+ int (*close)(int str_id);
+ int (*start)(int str_id);
+ int (*stop)(int str_id);
+ int (*pause)(int str_id);
+ int (*pause_release)(int str_id);
+ int (*drain)(int received_total);
+ int (*set_params)(int str_id, struct sprd_compr_params *params);
+};
+
+struct sprd_compr_data {
+ struct sprd_compr_ops *ops;
+ struct sprd_pcm_dma_params *dma_params;
+};
+
+#endif /* __SPRD_PCM_DMA_H */