summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-pci1xxxx.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/spi/spi-pci1xxxx.c
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/spi/spi-pci1xxxx.c')
-rw-r--r--drivers/spi/spi-pci1xxxx.c510
1 files changed, 483 insertions, 27 deletions
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 5a42266820..cc18d32037 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -5,8 +5,15 @@
// Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -32,8 +39,68 @@
#define SPI_MST_CTL_MODE_SEL (BIT(2))
#define SPI_MST_CTL_GO (BIT(0))
+#define SPI_PERI_ADDR_BASE (0x160000)
+#define SPI_SYSTEM_ADDR_BASE (0x2000)
#define SPI_MST1_ADDR_BASE (0x800)
+#define DEV_REV_REG (SPI_SYSTEM_ADDR_BASE + 0x00)
+#define SPI_SYSLOCK_REG (SPI_SYSTEM_ADDR_BASE + 0xA0)
+#define SPI_CONFIG_PERI_ENABLE_REG (SPI_SYSTEM_ADDR_BASE + 0x108)
+
+#define SPI_PERI_ENBLE_PF_MASK (GENMASK(17, 16))
+#define DEV_REV_MASK (GENMASK(7, 0))
+
+#define SPI_SYSLOCK BIT(4)
+#define SPI0 (0)
+#define SPI1 (1)
+
+/* DMA Related Registers */
+#define SPI_DMA_ADDR_BASE (0x1000)
+#define SPI_DMA_GLOBAL_WR_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x0C)
+#define SPI_DMA_WR_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x10)
+#define SPI_DMA_GLOBAL_RD_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x2C)
+#define SPI_DMA_RD_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x30)
+#define SPI_DMA_INTR_WR_STS (SPI_DMA_ADDR_BASE + 0x4C)
+#define SPI_DMA_WR_INT_MASK (SPI_DMA_ADDR_BASE + 0x54)
+#define SPI_DMA_INTR_WR_CLR (SPI_DMA_ADDR_BASE + 0x58)
+#define SPI_DMA_ERR_WR_STS (SPI_DMA_ADDR_BASE + 0x5C)
+#define SPI_DMA_INTR_IMWR_WDONE_LOW (SPI_DMA_ADDR_BASE + 0x60)
+#define SPI_DMA_INTR_IMWR_WDONE_HIGH (SPI_DMA_ADDR_BASE + 0x64)
+#define SPI_DMA_INTR_IMWR_WABORT_LOW (SPI_DMA_ADDR_BASE + 0x68)
+#define SPI_DMA_INTR_IMWR_WABORT_HIGH (SPI_DMA_ADDR_BASE + 0x6C)
+#define SPI_DMA_INTR_WR_IMWR_DATA (SPI_DMA_ADDR_BASE + 0x70)
+#define SPI_DMA_INTR_RD_STS (SPI_DMA_ADDR_BASE + 0xA0)
+#define SPI_DMA_RD_INT_MASK (SPI_DMA_ADDR_BASE + 0xA8)
+#define SPI_DMA_INTR_RD_CLR (SPI_DMA_ADDR_BASE + 0xAC)
+#define SPI_DMA_ERR_RD_STS (SPI_DMA_ADDR_BASE + 0xB8)
+#define SPI_DMA_INTR_IMWR_RDONE_LOW (SPI_DMA_ADDR_BASE + 0xCC)
+#define SPI_DMA_INTR_IMWR_RDONE_HIGH (SPI_DMA_ADDR_BASE + 0xD0)
+#define SPI_DMA_INTR_IMWR_RABORT_LOW (SPI_DMA_ADDR_BASE + 0xD4)
+#define SPI_DMA_INTR_IMWR_RABORT_HIGH (SPI_DMA_ADDR_BASE + 0xD8)
+#define SPI_DMA_INTR_RD_IMWR_DATA (SPI_DMA_ADDR_BASE + 0xDC)
+
+#define SPI_DMA_CH0_WR_BASE (SPI_DMA_ADDR_BASE + 0x200)
+#define SPI_DMA_CH0_RD_BASE (SPI_DMA_ADDR_BASE + 0x300)
+#define SPI_DMA_CH1_WR_BASE (SPI_DMA_ADDR_BASE + 0x400)
+#define SPI_DMA_CH1_RD_BASE (SPI_DMA_ADDR_BASE + 0x500)
+
+#define SPI_DMA_CH_CTL1_OFFSET (0x00)
+#define SPI_DMA_CH_XFER_LEN_OFFSET (0x08)
+#define SPI_DMA_CH_SAR_LO_OFFSET (0x0C)
+#define SPI_DMA_CH_SAR_HI_OFFSET (0x10)
+#define SPI_DMA_CH_DAR_LO_OFFSET (0x14)
+#define SPI_DMA_CH_DAR_HI_OFFSET (0x18)
+
+#define SPI_DMA_CH0_DONE_INT BIT(0)
+#define SPI_DMA_CH1_DONE_INT BIT(1)
+#define SPI_DMA_CH0_ABORT_INT BIT(16)
+#define SPI_DMA_CH1_ABORT_INT BIT(17)
+#define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
+#define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
+#define DMA_CH_CONTROL_LIE BIT(3)
+#define DMA_CH_CONTROL_RIE BIT(4)
+#define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
+
/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
#define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
@@ -50,6 +117,9 @@
#define SPI_MAX_DATA_LEN 320
#define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
+#define SYSLOCK_RETRY_CNT (1000)
+#define SPI_DMA_ENGINE_EN (0x1)
+#define SPI_DMA_ENGINE_DIS (0x0)
#define SPI_INTR BIT(8)
#define SPI_FORCE_CE BIT(4)
@@ -62,11 +132,21 @@
struct pci1xxxx_spi_internal {
u8 hw_inst;
- bool spi_xfer_in_progress;
+ u8 clkdiv;
int irq;
+ int mode;
+ bool spi_xfer_in_progress;
+ void *rx_buf;
+ bool dma_aborted_rd;
+ u32 bytes_recvd;
+ u32 tx_sgl_len;
+ u32 rx_sgl_len;
+ struct scatterlist *tx_sgl, *rx_sgl;
+ bool dma_aborted_wr;
struct completion spi_xfer_done;
struct spi_controller *spi_host;
struct pci1xxxx_spi *parent;
+ struct spi_transfer *xfer;
struct {
unsigned int dev_sel : 3;
unsigned int msi_vector_sel : 1;
@@ -76,7 +156,12 @@ struct pci1xxxx_spi_internal {
struct pci1xxxx_spi {
struct pci_dev *dev;
u8 total_hw_instances;
+ u8 dev_rev;
void __iomem *reg_base;
+ void __iomem *dma_offset_bar;
+ /* lock to safely access the DMA registers in isr */
+ spinlock_t dma_reg_lock;
+ bool can_dma;
struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances);
};
@@ -106,6 +191,114 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
+static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par)
+{
+ writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG);
+ return readl(par->reg_base + SPI_SYSLOCK_REG);
+}
+
+static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_spi *par)
+{
+ u32 regval;
+
+ return readx_poll_timeout(pci1xxxx_set_sys_lock, par, regval,
+ (regval & SPI_SYSLOCK), 100,
+ SYSLOCK_RETRY_CNT * 100);
+}
+
+static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
+{
+ writel(0x0, par->reg_base + SPI_SYSLOCK_REG);
+}
+
+static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
+{
+ struct pci_dev *pdev = spi_bus->dev;
+ u32 pf_num;
+ u32 regval;
+ int ret;
+
+ /*
+ * DEV REV Registers is a system register, HW Syslock bit
+ * should be acquired before accessing the register
+ */
+ ret = pci1xxxx_acquire_sys_lock(spi_bus);
+ if (ret) {
+ dev_err(&pdev->dev, "Error failed to acquire syslock\n");
+ return ret;
+ }
+
+ regval = readl(spi_bus->reg_base + DEV_REV_REG);
+ spi_bus->dev_rev = regval & DEV_REV_MASK;
+ if (spi_bus->dev_rev >= 0xC0) {
+ regval = readl(spi_bus->reg_base +
+ SPI_CONFIG_PERI_ENABLE_REG);
+ pf_num = regval & SPI_PERI_ENBLE_PF_MASK;
+ }
+
+ pci1xxxx_release_sys_lock(spi_bus);
+
+ /*
+ * DMA is supported only from C0 and SPI can use DMA only if
+ * it is mapped to PF0
+ */
+ if (spi_bus->dev_rev < 0xC0 || pf_num)
+ return -EOPNOTSUPP;
+
+ /*
+ * DMA Supported only with MSI Interrupts
+ * One of the SPI instance's MSI vector address and data
+ * is used for DMA Interrupt
+ */
+ if (!irq_get_msi_desc(irq)) {
+ dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2));
+ if (!spi_bus->dma_offset_bar) {
+ dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ dev_warn(&pdev->dev, "Error failed to set DMA mask, will operate in PIO mode\n");
+ pcim_iounmap(pdev, spi_bus->dma_offset_bar);
+ spi_bus->dma_offset_bar = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
+{
+ struct msi_msg msi;
+ int ret;
+
+ ret = pci1xxxx_check_spi_can_dma(spi_bus, irq);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&spi_bus->dma_reg_lock);
+ get_cached_msi_msg(irq, &msi);
+ writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+ writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
+ writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
+ writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+ dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
+ spi_bus->can_dma = true;
+ return 0;
+}
+
static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
@@ -146,12 +339,79 @@ static u8 pci1xxxx_get_clock_div(u32 hz)
return val;
}
-static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
- struct spi_device *spi, struct spi_transfer *xfer)
+static void pci1xxxx_spi_setup_dma_to_io(struct pci1xxxx_spi_internal *p,
+ dma_addr_t dma_addr, u32 len)
+{
+ void __iomem *base;
+
+ if (!p->hw_inst)
+ base = p->parent->dma_offset_bar + SPI_DMA_CH0_RD_BASE;
+ else
+ base = p->parent->dma_offset_bar + SPI_DMA_CH1_RD_BASE;
+
+ writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
+ writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
+ writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_SAR_LO_OFFSET);
+ writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_SAR_HI_OFFSET);
+ /* Updated SPI Command Registers */
+ writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_DAR_LO_OFFSET);
+ writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_DAR_HI_OFFSET);
+}
+
+static void pci1xxxx_spi_setup_dma_from_io(struct pci1xxxx_spi_internal *p,
+ dma_addr_t dma_addr, u32 len)
+{
+ void *base;
+
+ if (!p->hw_inst)
+ base = p->parent->dma_offset_bar + SPI_DMA_CH0_WR_BASE;
+ else
+ base = p->parent->dma_offset_bar + SPI_DMA_CH1_WR_BASE;
+
+ writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
+ writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
+ writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_DAR_LO_OFFSET);
+ writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_DAR_HI_OFFSET);
+ writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_SAR_LO_OFFSET);
+ writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_SAR_HI_OFFSET);
+}
+
+static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
+ u8 clkdiv, u32 len)
+{
+ u32 regval;
+
+ regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+ regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
+ SPI_MST_CTL_SPEED_MASK);
+
+ if (mode == SPI_MODE_3)
+ regval |= SPI_MST_CTL_MODE_SEL;
+
+ regval |= FIELD_PREP(SPI_MST_CTL_CMD_LEN_MASK, len);
+ regval |= FIELD_PREP(SPI_MST_CTL_SPEED_MASK, clkdiv);
+ writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
+{
+ u32 regval;
+
+ regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+ regval |= SPI_MST_CTL_GO;
+ writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
- int mode, len, loop_iter, transfer_len;
struct pci1xxxx_spi *par = p->parent;
+ int len, loop_iter, transfer_len;
unsigned long bytes_transfered;
unsigned long bytes_recvd;
unsigned long loop_count;
@@ -161,7 +421,7 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
u8 clkdiv;
p->spi_xfer_in_progress = true;
- mode = spi->mode;
+ p->bytes_recvd = 0;
clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
@@ -186,26 +446,8 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
&tx_buf[bytes_transfered], len);
bytes_transfered += len;
- regval = readl(par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
- SPI_MST_CTL_SPEED_MASK);
-
- if (mode == SPI_MODE_3)
- regval |= SPI_MST_CTL_MODE_SEL;
- else
- regval &= ~SPI_MST_CTL_MODE_SEL;
-
- regval |= (clkdiv << 5);
- regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
- regval |= (len << 8);
- writel(regval, par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval = readl(par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval |= SPI_MST_CTL_GO;
- writel(regval, par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
+ pci1xxxx_start_spi_xfer(p, p->hw_inst);
/* Wait for DMA_TERM interrupt */
result = wait_for_completion_timeout(&p->spi_xfer_done,
@@ -225,7 +467,113 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
return 0;
}
-static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
+ struct pci1xxxx_spi *par = p->parent;
+ dma_addr_t rx_dma_addr = 0;
+ dma_addr_t tx_dma_addr = 0;
+ int ret = 0;
+ u32 regval;
+
+ p->spi_xfer_in_progress = true;
+ p->tx_sgl = xfer->tx_sg.sgl;
+ p->rx_sgl = xfer->rx_sg.sgl;
+ p->rx_buf = xfer->rx_buf;
+ regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ if (!xfer->tx_buf || !p->tx_sgl) {
+ ret = -EINVAL;
+ goto error;
+ }
+ p->xfer = xfer;
+ p->mode = spi->mode;
+ p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
+ p->bytes_recvd = 0;
+ p->rx_buf = xfer->rx_buf;
+ regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ tx_dma_addr = sg_dma_address(p->tx_sgl);
+ rx_dma_addr = sg_dma_address(p->rx_sgl);
+ p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+ p->rx_sgl_len = sg_dma_len(p->rx_sgl);
+ pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
+ pci1xxxx_spi_setup_dma_to_io(p, (tx_dma_addr), p->tx_sgl_len);
+ if (rx_dma_addr)
+ pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
+ writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
+
+ reinit_completion(&p->spi_xfer_done);
+ /* Wait for DMA_TERM interrupt */
+ ret = wait_for_completion_timeout(&p->spi_xfer_done, PCI1XXXX_SPI_TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ if (p->dma_aborted_rd) {
+ writel(SPI_DMA_ENGINE_DIS,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ /*
+ * DMA ENGINE reset takes time if any TLP
+ * completeion in progress, should wait
+ * till DMA Engine reset is completed.
+ */
+ ret = readl_poll_timeout(par->dma_offset_bar +
+ SPI_DMA_GLOBAL_RD_ENGINE_EN, regval,
+ (regval == 0x0), 0, USEC_PER_MSEC);
+ if (ret) {
+ ret = -ECANCELED;
+ goto error;
+ }
+ writel(SPI_DMA_ENGINE_EN,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ p->dma_aborted_rd = false;
+ ret = -ECANCELED;
+ }
+ if (p->dma_aborted_wr) {
+ writel(SPI_DMA_ENGINE_DIS,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+
+ /*
+ * DMA ENGINE reset takes time if any TLP
+ * completeion in progress, should wait
+ * till DMA Engine reset is completed.
+ */
+ ret = readl_poll_timeout(par->dma_offset_bar +
+ SPI_DMA_GLOBAL_WR_ENGINE_EN, regval,
+ (regval == 0x0), 0, USEC_PER_MSEC);
+ if (ret) {
+ ret = -ECANCELED;
+ goto error;
+ }
+
+ writel(SPI_DMA_ENGINE_EN,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+ p->dma_aborted_wr = false;
+ ret = -ECANCELED;
+ }
+ goto error;
+ }
+ ret = 0;
+
+error:
+ p->spi_xfer_in_progress = false;
+
+ return ret;
+}
+
+static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
+ return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
+ else
+ return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
+}
+
+static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev)
{
struct pci1xxxx_spi_internal *p = dev;
irqreturn_t spi_int_fired = IRQ_NONE;
@@ -235,15 +583,117 @@ static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
if (regval & SPI_INTR) {
/* Clear xfer_done */
+ if (p->parent->can_dma && p->rx_buf)
+ writel(p->hw_inst, p->parent->dma_offset_bar +
+ SPI_DMA_WR_DOORBELL_REG);
+ else
+ complete(&p->parent->spi_int[p->hw_inst]->spi_xfer_done);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ return spi_int_fired;
+}
+
+static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p)
+{
+ dma_addr_t tx_dma_addr = 0;
+ dma_addr_t rx_dma_addr = 0;
+ u32 prev_len;
+
+ p->tx_sgl = sg_next(p->tx_sgl);
+ if (p->rx_sgl)
+ p->rx_sgl = sg_next(p->rx_sgl);
+ if (!p->tx_sgl) {
+ /* Clear xfer_done */
complete(&p->spi_xfer_done);
+ } else {
+ tx_dma_addr = sg_dma_address(p->tx_sgl);
+ prev_len = p->tx_sgl_len;
+ p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+ if (prev_len != p->tx_sgl_len)
+ pci1xxxx_spi_setup(p->parent,
+ p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
+ pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len);
+ if (p->rx_sgl) {
+ rx_dma_addr = sg_dma_address(p->rx_sgl);
+ p->rx_sgl_len = sg_dma_len(p->rx_sgl);
+ pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
+ }
+ writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
+ }
+}
+
+static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
+{
+ struct pci1xxxx_spi_internal *p = dev;
+ irqreturn_t spi_int_fired = IRQ_NONE;
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
+ /* Clear the DMA RD INT and start spi xfer*/
+ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
+ if (regval & SPI_DMA_DONE_INT_MASK) {
+ if (regval & SPI_DMA_CH0_DONE_INT)
+ pci1xxxx_start_spi_xfer(p, SPI0);
+ if (regval & SPI_DMA_CH1_DONE_INT)
+ pci1xxxx_start_spi_xfer(p, SPI1);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ if (regval & SPI_DMA_ABORT_INT_MASK) {
+ p->dma_aborted_rd = true;
spi_int_fired = IRQ_HANDLED;
}
+ writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
- writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ /* Clear the DMA WR INT */
+ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS);
+ if (regval & SPI_DMA_DONE_INT_MASK) {
+ if (regval & SPI_DMA_CH0_DONE_INT)
+ pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]);
+ if (regval & SPI_DMA_CH1_DONE_INT)
+ pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]);
+
+ spi_int_fired = IRQ_HANDLED;
+ }
+ if (regval & SPI_DMA_ABORT_INT_MASK) {
+ p->dma_aborted_wr = true;
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
+ spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
+
+ /* Clear the SPI GO_BIT Interrupt */
+ regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ if (regval & SPI_INTR) {
+ writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
return spi_int_fired;
}
+static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+{
+ struct pci1xxxx_spi_internal *p = dev;
+
+ if (p->spi_host->can_dma(p->spi_host, NULL, p->xfer))
+ return pci1xxxx_spi_isr_dma(irq, dev);
+ else
+ return pci1xxxx_spi_isr_io(irq, dev);
+}
+
+static bool pci1xxxx_spi_can_dma(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(host);
+ struct pci1xxxx_spi *par = p->parent;
+
+ return par->can_dma;
+}
+
static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 hw_inst_cnt, iter, start, only_sec_inst;
@@ -326,6 +776,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
goto error;
}
+ ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
+ if (ret && ret != -EOPNOTSUPP)
+ goto error;
+
/* This register is only applicable for 1st instance */
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
if (!only_sec_inst)
@@ -362,7 +816,9 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
SPI_TX_DUAL | SPI_LOOP;
+ spi_host->can_dma = pci1xxxx_spi_can_dma;
spi_host->transfer_one = pci1xxxx_spi_transfer_one;
+
spi_host->set_cs = pci1xxxx_spi_set_cs;
spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;