diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
commit | 3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch) | |
tree | 95a985d3019522cea546b7d8df621369bc44fc6c /drivers/spi | |
parent | Adding debian version 6.9.12-1. (diff) | |
download | linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/spi')
43 files changed, 1991 insertions, 578 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index bc7021da2f..a2c99ff33e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -57,6 +57,16 @@ config SPI_MEM comment "SPI Master Controller Drivers" +config SPI_AIROHA_SNFI + tristate "Airoha SPI NAND Flash Interface" + depends on ARCH_AIROHA || COMPILE_TEST + depends on SPI_MASTER + select REGMAP_MMIO + help + This enables support for SPI-NAND mode on the Airoha NAND + Flash Interface found on Airoha ARM SoCs. This controller + is implemented as a SPI-MEM controller. + config SPI_ALTERA tristate "Altera SPI Controller platform driver" select SPI_ALTERA_CORE @@ -216,11 +226,11 @@ config SPI_BCMBCA_HSSPI explicitly. config SPI_BITBANG - tristate "Utilities for Bitbanging SPI masters" + tristate "Utilities for Bitbanging SPI host controllers" help With a few GPIO pins, your system can bitbang the SPI protocol. Select this to get SPI support through I/O pins (GPIO, parallel - port, etc). Or, some systems' SPI master controller drivers use + port, etc). Or, some systems' SPI host controller drivers use this code to manage the per-word or per-transfer accesses to the hardware shift registers. @@ -246,7 +256,7 @@ config SPI_CADENCE config SPI_CADENCE_QUADSPI tristate "Cadence Quad SPI controller" - depends on OF && (ARM || ARM64 || X86 || RISCV || COMPILE_TEST) + depends on OF && (ARM || ARM64 || X86 || RISCV || MIPS || COMPILE_TEST) help Enable support for the Cadence Quad SPI Flash controller. @@ -284,6 +294,7 @@ config SPI_COLDFIRE_QSPI config SPI_CS42L43 tristate "Cirrus Logic CS42L43 SPI controller" depends on MFD_CS42L43 && PINCTRL_CS42L43 + select GPIO_SWNODE_UNDEFINED help This enables support for the SPI controller inside the Cirrus Logic CS42L43 audio codec. @@ -817,12 +828,11 @@ config SPI_PPC4xx config SPI_PXA2XX tristate "PXA2xx SSP SPI master" - depends on ARCH_PXA || ARCH_MMP || PCI || ACPI || COMPILE_TEST + depends on ARCH_PXA || ARCH_MMP || (X86 && (PCI || ACPI)) || COMPILE_TEST select PXA_SSP if ARCH_PXA || ARCH_MMP help This enables using a PXA2xx or Sodaville SSP port as a SPI master - controller. The driver can be configured to use any SSP port and - additional documentation can be found a Documentation/spi/pxa2xx.rst. + controller. The driver can be configured to use any SSP port. config SPI_PXA2XX_PCI def_tristate SPI_PXA2XX && PCI && COMMON_CLK diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 4ff8d725ba..e694254dec 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_SPIDEV) += spidev.o obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o # SPI master controller drivers (bus) +obj-$(CONFIG_SPI_AIROHA_SNFI) += spi-airoha-snfi.o obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 370c4d1572..5aaff3bee1 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -756,8 +756,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); int ret; - clk_prepare(aq->pclk); - clk_prepare(aq->qspick); + ret = clk_prepare(aq->pclk); + if (ret) + return ret; + + ret = clk_prepare(aq->qspick); + if (ret) { + clk_unprepare(aq->pclk); + return ret; + } ret = pm_runtime_force_resume(dev); if (ret < 0) diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c new file mode 100644 index 0000000000..9d97ec9888 --- /dev/null +++ b/drivers/spi/spi-airoha-snfi.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + * Author: Ray Liu <ray.liu@airoha.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/limits.h> +#include <linux/math.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/sizes.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> +#include <linux/types.h> +#include <asm/unaligned.h> + +/* SPI */ +#define REG_SPI_CTRL_BASE 0x1FA10000 + +#define REG_SPI_CTRL_READ_MODE 0x0000 +#define REG_SPI_CTRL_READ_IDLE_EN 0x0004 +#define REG_SPI_CTRL_SIDLY 0x0008 +#define REG_SPI_CTRL_CSHEXT 0x000c +#define REG_SPI_CTRL_CSLEXT 0x0010 + +#define REG_SPI_CTRL_MTX_MODE_TOG 0x0014 +#define SPI_CTRL_MTX_MODE_TOG GENMASK(3, 0) + +#define REG_SPI_CTRL_RDCTL_FSM 0x0018 +#define SPI_CTRL_RDCTL_FSM GENMASK(3, 0) + +#define REG_SPI_CTRL_MACMUX_SEL 0x001c + +#define REG_SPI_CTRL_MANUAL_EN 0x0020 +#define SPI_CTRL_MANUAL_EN BIT(0) + +#define REG_SPI_CTRL_OPFIFO_EMPTY 0x0024 +#define SPI_CTRL_OPFIFO_EMPTY BIT(0) + +#define REG_SPI_CTRL_OPFIFO_WDATA 0x0028 +#define SPI_CTRL_OPFIFO_LEN GENMASK(8, 0) +#define SPI_CTRL_OPFIFO_OP GENMASK(13, 9) + +#define REG_SPI_CTRL_OPFIFO_FULL 0x002c +#define SPI_CTRL_OPFIFO_FULL BIT(0) + +#define REG_SPI_CTRL_OPFIFO_WR 0x0030 +#define SPI_CTRL_OPFIFO_WR BIT(0) + +#define REG_SPI_CTRL_DFIFO_FULL 0x0034 +#define SPI_CTRL_DFIFO_FULL BIT(0) + +#define REG_SPI_CTRL_DFIFO_WDATA 0x0038 +#define SPI_CTRL_DFIFO_WDATA GENMASK(7, 0) + +#define REG_SPI_CTRL_DFIFO_EMPTY 0x003c +#define SPI_CTRL_DFIFO_EMPTY BIT(0) + +#define REG_SPI_CTRL_DFIFO_RD 0x0040 +#define SPI_CTRL_DFIFO_RD BIT(0) + +#define REG_SPI_CTRL_DFIFO_RDATA 0x0044 +#define SPI_CTRL_DFIFO_RDATA GENMASK(7, 0) + +#define REG_SPI_CTRL_DUMMY 0x0080 +#define SPI_CTRL_CTRL_DUMMY GENMASK(3, 0) + +#define REG_SPI_CTRL_PROBE_SEL 0x0088 +#define REG_SPI_CTRL_INTERRUPT 0x0090 +#define REG_SPI_CTRL_INTERRUPT_EN 0x0094 +#define REG_SPI_CTRL_SI_CK_SEL 0x009c +#define REG_SPI_CTRL_SW_CFGNANDADDR_VAL 0x010c +#define REG_SPI_CTRL_SW_CFGNANDADDR_EN 0x0110 +#define REG_SPI_CTRL_SFC_STRAP 0x0114 + +#define REG_SPI_CTRL_NFI2SPI_EN 0x0130 +#define SPI_CTRL_NFI2SPI_EN BIT(0) + +/* NFI2SPI */ +#define REG_SPI_NFI_CNFG 0x0000 +#define SPI_NFI_DMA_MODE BIT(0) +#define SPI_NFI_READ_MODE BIT(1) +#define SPI_NFI_DMA_BURST_EN BIT(2) +#define SPI_NFI_HW_ECC_EN BIT(8) +#define SPI_NFI_AUTO_FDM_EN BIT(9) +#define SPI_NFI_OPMODE GENMASK(14, 12) + +#define REG_SPI_NFI_PAGEFMT 0x0004 +#define SPI_NFI_PAGE_SIZE GENMASK(1, 0) +#define SPI_NFI_SPARE_SIZE GENMASK(5, 4) + +#define REG_SPI_NFI_CON 0x0008 +#define SPI_NFI_FIFO_FLUSH BIT(0) +#define SPI_NFI_RST BIT(1) +#define SPI_NFI_RD_TRIG BIT(8) +#define SPI_NFI_WR_TRIG BIT(9) +#define SPI_NFI_SEC_NUM GENMASK(15, 12) + +#define REG_SPI_NFI_INTR_EN 0x0010 +#define SPI_NFI_RD_DONE_EN BIT(0) +#define SPI_NFI_WR_DONE_EN BIT(1) +#define SPI_NFI_RST_DONE_EN BIT(2) +#define SPI_NFI_ERASE_DONE_EN BIT(3) +#define SPI_NFI_BUSY_RETURN_EN BIT(4) +#define SPI_NFI_ACCESS_LOCK_EN BIT(5) +#define SPI_NFI_AHB_DONE_EN BIT(6) +#define SPI_NFI_ALL_IRQ_EN \ + (SPI_NFI_RD_DONE_EN | SPI_NFI_WR_DONE_EN | \ + SPI_NFI_RST_DONE_EN | SPI_NFI_ERASE_DONE_EN | \ + SPI_NFI_BUSY_RETURN_EN | SPI_NFI_ACCESS_LOCK_EN | \ + SPI_NFI_AHB_DONE_EN) + +#define REG_SPI_NFI_INTR 0x0014 +#define SPI_NFI_AHB_DONE BIT(6) + +#define REG_SPI_NFI_CMD 0x0020 + +#define REG_SPI_NFI_ADDR_NOB 0x0030 +#define SPI_NFI_ROW_ADDR_NOB GENMASK(6, 4) + +#define REG_SPI_NFI_STA 0x0060 +#define REG_SPI_NFI_FIFOSTA 0x0064 +#define REG_SPI_NFI_STRADDR 0x0080 +#define REG_SPI_NFI_FDM0L 0x00a0 +#define REG_SPI_NFI_FDM0M 0x00a4 +#define REG_SPI_NFI_FDM7L 0x00d8 +#define REG_SPI_NFI_FDM7M 0x00dc +#define REG_SPI_NFI_FIFODATA0 0x0190 +#define REG_SPI_NFI_FIFODATA1 0x0194 +#define REG_SPI_NFI_FIFODATA2 0x0198 +#define REG_SPI_NFI_FIFODATA3 0x019c +#define REG_SPI_NFI_MASTERSTA 0x0224 + +#define REG_SPI_NFI_SECCUS_SIZE 0x022c +#define SPI_NFI_CUS_SEC_SIZE GENMASK(12, 0) +#define SPI_NFI_CUS_SEC_SIZE_EN BIT(16) + +#define REG_SPI_NFI_RD_CTL2 0x0510 +#define REG_SPI_NFI_RD_CTL3 0x0514 + +#define REG_SPI_NFI_PG_CTL1 0x0524 +#define SPI_NFI_PG_LOAD_CMD GENMASK(15, 8) + +#define REG_SPI_NFI_PG_CTL2 0x0528 +#define REG_SPI_NFI_NOR_PROG_ADDR 0x052c +#define REG_SPI_NFI_NOR_RD_ADDR 0x0534 + +#define REG_SPI_NFI_SNF_MISC_CTL 0x0538 +#define SPI_NFI_DATA_READ_WR_MODE GENMASK(18, 16) + +#define REG_SPI_NFI_SNF_MISC_CTL2 0x053c +#define SPI_NFI_READ_DATA_BYTE_NUM GENMASK(12, 0) +#define SPI_NFI_PROG_LOAD_BYTE_NUM GENMASK(28, 16) + +#define REG_SPI_NFI_SNF_STA_CTL1 0x0550 +#define SPI_NFI_READ_FROM_CACHE_DONE BIT(25) +#define SPI_NFI_LOAD_TO_CACHE_DONE BIT(26) + +#define REG_SPI_NFI_SNF_STA_CTL2 0x0554 + +#define REG_SPI_NFI_SNF_NFI_CNFG 0x055c +#define SPI_NFI_SPI_MODE BIT(0) + +/* SPI NAND Protocol OP */ +#define SPI_NAND_OP_GET_FEATURE 0x0f +#define SPI_NAND_OP_SET_FEATURE 0x1f +#define SPI_NAND_OP_PAGE_READ 0x13 +#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE 0x03 +#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE_FAST 0x0b +#define SPI_NAND_OP_READ_FROM_CACHE_DUAL 0x3b +#define SPI_NAND_OP_READ_FROM_CACHE_QUAD 0x6b +#define SPI_NAND_OP_WRITE_ENABLE 0x06 +#define SPI_NAND_OP_WRITE_DISABLE 0x04 +#define SPI_NAND_OP_PROGRAM_LOAD_SINGLE 0x02 +#define SPI_NAND_OP_PROGRAM_LOAD_QUAD 0x32 +#define SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE 0x84 +#define SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD 0x34 +#define SPI_NAND_OP_PROGRAM_EXECUTE 0x10 +#define SPI_NAND_OP_READ_ID 0x9f +#define SPI_NAND_OP_BLOCK_ERASE 0xd8 +#define SPI_NAND_OP_RESET 0xff +#define SPI_NAND_OP_DIE_SELECT 0xc2 + +#define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256) +#define SPI_MAX_TRANSFER_SIZE 511 + +enum airoha_snand_mode { + SPI_MODE_AUTO, + SPI_MODE_MANUAL, + SPI_MODE_DMA, +}; + +enum airoha_snand_cs { + SPI_CHIP_SEL_HIGH, + SPI_CHIP_SEL_LOW, +}; + +struct airoha_snand_dev { + size_t buf_len; + + u8 *txrx_buf; + dma_addr_t dma_addr; + + u64 cur_page_num; + bool data_need_update; +}; + +struct airoha_snand_ctrl { + struct device *dev; + struct regmap *regmap_ctrl; + struct regmap *regmap_nfi; + struct clk *spi_clk; + + struct { + size_t page_size; + size_t sec_size; + u8 sec_num; + u8 spare_size; + } nfi_cfg; +}; + +static int airoha_snand_set_fifo_op(struct airoha_snand_ctrl *as_ctrl, + u8 op_cmd, int op_len) +{ + int err; + u32 val; + + err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WDATA, + FIELD_PREP(SPI_CTRL_OPFIFO_LEN, op_len) | + FIELD_PREP(SPI_CTRL_OPFIFO_OP, op_cmd)); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_OPFIFO_FULL, + val, !(val & SPI_CTRL_OPFIFO_FULL), + 0, 250 * USEC_PER_MSEC); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WR, + SPI_CTRL_OPFIFO_WR); + if (err) + return err; + + return regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_OPFIFO_EMPTY, + val, (val & SPI_CTRL_OPFIFO_EMPTY), + 0, 250 * USEC_PER_MSEC); +} + +static int airoha_snand_set_cs(struct airoha_snand_ctrl *as_ctrl, u8 cs) +{ + return airoha_snand_set_fifo_op(as_ctrl, cs, sizeof(cs)); +} + +static int airoha_snand_write_data_to_fifo(struct airoha_snand_ctrl *as_ctrl, + const u8 *data, int len) +{ + int i; + + for (i = 0; i < len; i++) { + int err; + u32 val; + + /* 1. Wait until dfifo is not full */ + err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_FULL, val, + !(val & SPI_CTRL_DFIFO_FULL), + 0, 250 * USEC_PER_MSEC); + if (err) + return err; + + /* 2. Write data to register DFIFO_WDATA */ + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_WDATA, + FIELD_PREP(SPI_CTRL_DFIFO_WDATA, data[i])); + if (err) + return err; + + /* 3. Wait until dfifo is not full */ + err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_FULL, val, + !(val & SPI_CTRL_DFIFO_FULL), + 0, 250 * USEC_PER_MSEC); + if (err) + return err; + } + + return 0; +} + +static int airoha_snand_read_data_from_fifo(struct airoha_snand_ctrl *as_ctrl, + u8 *ptr, int len) +{ + int i; + + for (i = 0; i < len; i++) { + int err; + u32 val; + + /* 1. wait until dfifo is not empty */ + err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_EMPTY, val, + !(val & SPI_CTRL_DFIFO_EMPTY), + 0, 250 * USEC_PER_MSEC); + if (err) + return err; + + /* 2. read from dfifo to register DFIFO_RDATA */ + err = regmap_read(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_RDATA, &val); + if (err) + return err; + + ptr[i] = FIELD_GET(SPI_CTRL_DFIFO_RDATA, val); + /* 3. enable register DFIFO_RD to read next byte */ + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_DFIFO_RD, SPI_CTRL_DFIFO_RD); + if (err) + return err; + } + + return 0; +} + +static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl, + enum airoha_snand_mode mode) +{ + int err; + + switch (mode) { + case SPI_MODE_MANUAL: { + u32 val; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_NFI2SPI_EN, 0); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_READ_IDLE_EN, 0); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_RDCTL_FSM, val, + !(val & SPI_CTRL_RDCTL_FSM), + 0, 250 * USEC_PER_MSEC); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_MTX_MODE_TOG, 9); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_MANUAL_EN, SPI_CTRL_MANUAL_EN); + if (err) + return err; + break; + } + case SPI_MODE_DMA: + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_NFI2SPI_EN, + SPI_CTRL_MANUAL_EN); + if (err < 0) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_MTX_MODE_TOG, 0x0); + if (err < 0) + return err; + + err = regmap_write(as_ctrl->regmap_ctrl, + REG_SPI_CTRL_MANUAL_EN, 0x0); + if (err < 0) + return err; + break; + case SPI_MODE_AUTO: + default: + break; + } + + return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0); +} + +static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, + const u8 *data, int len) +{ + int i, data_len; + + for (i = 0; i < len; i += data_len) { + int err; + + data_len = min(len, SPI_MAX_TRANSFER_SIZE); + err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); + if (err) + return err; + + err = airoha_snand_write_data_to_fifo(as_ctrl, &data[i], + data_len); + if (err < 0) + return err; + } + + return 0; +} + +static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, + int len) +{ + int i, data_len; + + for (i = 0; i < len; i += data_len) { + int err; + + data_len = min(len, SPI_MAX_TRANSFER_SIZE); + err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); + if (err) + return err; + + err = airoha_snand_read_data_from_fifo(as_ctrl, &data[i], + data_len); + if (err < 0) + return err; + } + + return 0; +} + +static int airoha_snand_nfi_init(struct airoha_snand_ctrl *as_ctrl) +{ + int err; + + /* switch to SNFI mode */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_NFI_CNFG, + SPI_NFI_SPI_MODE); + if (err) + return err; + + /* Enable DMA */ + return regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR_EN, + SPI_NFI_ALL_IRQ_EN, SPI_NFI_AHB_DONE_EN); +} + +static int airoha_snand_nfi_config(struct airoha_snand_ctrl *as_ctrl) +{ + int err; + u32 val; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_FIFO_FLUSH | SPI_NFI_RST); + if (err) + return err; + + /* auto FDM */ + err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_AUTO_FDM_EN); + if (err) + return err; + + /* HW ECC */ + err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_HW_ECC_EN); + if (err) + return err; + + /* DMA Burst */ + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_DMA_BURST_EN); + if (err) + return err; + + /* page format */ + switch (as_ctrl->nfi_cfg.spare_size) { + case 26: + val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x1); + break; + case 27: + val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x2); + break; + case 28: + val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x3); + break; + default: + val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x0); + break; + } + + err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT, + SPI_NFI_SPARE_SIZE, val); + if (err) + return err; + + switch (as_ctrl->nfi_cfg.page_size) { + case 2048: + val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x1); + break; + case 4096: + val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x2); + break; + default: + val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x0); + break; + } + + err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT, + SPI_NFI_PAGE_SIZE, val); + if (err) + return err; + + /* sec num */ + val = FIELD_PREP(SPI_NFI_SEC_NUM, as_ctrl->nfi_cfg.sec_num); + err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_SEC_NUM, val); + if (err) + return err; + + /* enable cust sec size */ + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE, + SPI_NFI_CUS_SEC_SIZE_EN); + if (err) + return err; + + /* set cust sec size */ + val = FIELD_PREP(SPI_NFI_CUS_SEC_SIZE, as_ctrl->nfi_cfg.sec_size); + return regmap_update_bits(as_ctrl->regmap_nfi, + REG_SPI_NFI_SECCUS_SIZE, + SPI_NFI_CUS_SEC_SIZE, val); +} + +static bool airoha_snand_is_page_ops(const struct spi_mem_op *op) +{ + if (op->addr.nbytes != 2) + return false; + + if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && + op->addr.buswidth != 4) + return false; + + switch (op->data.dir) { + case SPI_MEM_DATA_IN: + if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 0xf) + return false; + + /* quad in / quad out */ + if (op->addr.buswidth == 4) + return op->data.buswidth == 4; + + if (op->addr.buswidth == 2) + return op->data.buswidth == 2; + + /* standard spi */ + return op->data.buswidth == 4 || op->data.buswidth == 2 || + op->data.buswidth == 1; + case SPI_MEM_DATA_OUT: + return !op->dummy.nbytes && op->addr.buswidth == 1 && + (op->data.buswidth == 4 || op->data.buswidth == 1); + default: + return false; + } +} + +static int airoha_snand_adjust_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + size_t max_len; + + if (airoha_snand_is_page_ops(op)) { + struct airoha_snand_ctrl *as_ctrl; + + as_ctrl = spi_controller_get_devdata(mem->spi->controller); + max_len = as_ctrl->nfi_cfg.sec_size; + max_len += as_ctrl->nfi_cfg.spare_size; + max_len *= as_ctrl->nfi_cfg.sec_num; + + if (op->data.nbytes > max_len) + op->data.nbytes = max_len; + } else { + max_len = 1 + op->addr.nbytes + op->dummy.nbytes; + if (max_len >= 160) + return -EOPNOTSUPP; + + if (op->data.nbytes > 160 - max_len) + op->data.nbytes = 160 - max_len; + } + + return 0; +} + +static bool airoha_snand_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + if (!spi_mem_default_supports_op(mem, op)) + return false; + + if (op->cmd.buswidth != 1) + return false; + + if (airoha_snand_is_page_ops(op)) + return true; + + return (!op->addr.nbytes || op->addr.buswidth == 1) && + (!op->dummy.nbytes || op->dummy.buswidth == 1) && + (!op->data.nbytes || op->data.buswidth == 1); +} + +static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc) +{ + struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi); + + if (!as_dev->txrx_buf) + return -EINVAL; + + if (desc->info.offset + desc->info.length > U32_MAX) + return -EINVAL; + + if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl)) + return -EOPNOTSUPP; + + return 0; +} + +static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf) +{ + struct spi_device *spi = desc->mem->spi; + struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); + struct spi_mem_op *op = &desc->info.op_tmpl; + struct airoha_snand_ctrl *as_ctrl; + u32 val, rd_mode; + int err; + + if (!as_dev->data_need_update) + return len; + + as_dev->data_need_update = false; + + switch (op->cmd.opcode) { + case SPI_NAND_OP_READ_FROM_CACHE_DUAL: + rd_mode = 1; + break; + case SPI_NAND_OP_READ_FROM_CACHE_QUAD: + rd_mode = 2; + break; + default: + rd_mode = 0; + break; + } + + as_ctrl = spi_controller_get_devdata(spi->controller); + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA); + if (err < 0) + return err; + + err = airoha_snand_nfi_config(as_ctrl); + if (err) + return err; + + dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr, + as_dev->buf_len, DMA_BIDIRECTIONAL); + + /* set dma addr */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, + as_dev->dma_addr); + if (err) + return err; + + /* set cust sec size */ + val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num; + val = FIELD_PREP(SPI_NFI_READ_DATA_BYTE_NUM, val); + err = regmap_update_bits(as_ctrl->regmap_nfi, + REG_SPI_NFI_SNF_MISC_CTL2, + SPI_NFI_READ_DATA_BYTE_NUM, val); + if (err) + return err; + + /* set read command */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2, + op->cmd.opcode); + if (err) + return err; + + /* set read mode */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL, + FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode)); + if (err) + return err; + + /* set read addr */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0); + if (err) + return err; + + /* set nfi read */ + err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_OPMODE, + FIELD_PREP(SPI_NFI_OPMODE, 6)); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0); + if (err) + return err; + + /* trigger dma start read */ + err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_RD_TRIG); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_RD_TRIG); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, + REG_SPI_NFI_SNF_STA_CTL1, val, + (val & SPI_NFI_READ_FROM_CACHE_DONE), + 0, 1 * USEC_PER_SEC); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_READ_FROM_CACHE_DONE); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR, + val, (val & SPI_NFI_AHB_DONE), 0, + 1 * USEC_PER_SEC); + if (err) + return err; + + /* DMA read need delay for data ready from controller to DRAM */ + udelay(1); + + dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr, + as_dev->buf_len, DMA_BIDIRECTIONAL); + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + if (err < 0) + return err; + + memcpy(buf, as_dev->txrx_buf + offs, len); + + return len; +} + +static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf) +{ + struct spi_device *spi = desc->mem->spi; + struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); + struct spi_mem_op *op = &desc->info.op_tmpl; + struct airoha_snand_ctrl *as_ctrl; + u32 wr_mode, val; + int err; + + as_ctrl = spi_controller_get_devdata(spi->controller); + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + if (err < 0) + return err; + + dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr, + as_dev->buf_len, DMA_BIDIRECTIONAL); + memcpy(as_dev->txrx_buf + offs, buf, len); + dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr, + as_dev->buf_len, DMA_BIDIRECTIONAL); + + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA); + if (err < 0) + return err; + + err = airoha_snand_nfi_config(as_ctrl); + if (err) + return err; + + if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD || + op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD) + wr_mode = BIT(1); + else + wr_mode = 0; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, + as_dev->dma_addr); + if (err) + return err; + + val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM, + as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num); + err = regmap_update_bits(as_ctrl->regmap_nfi, + REG_SPI_NFI_SNF_MISC_CTL2, + SPI_NFI_PROG_LOAD_BYTE_NUM, val); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1, + FIELD_PREP(SPI_NFI_PG_LOAD_CMD, + op->cmd.opcode)); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL, + FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode)); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0); + if (err) + return err; + + err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_READ_MODE); + if (err) + return err; + + err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_OPMODE, + FIELD_PREP(SPI_NFI_OPMODE, 3)); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, + SPI_NFI_DMA_MODE); + if (err) + return err; + + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80); + if (err) + return err; + + err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_WR_TRIG); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, + SPI_NFI_WR_TRIG); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR, + val, (val & SPI_NFI_AHB_DONE), 0, + 1 * USEC_PER_SEC); + if (err) + return err; + + err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, + REG_SPI_NFI_SNF_STA_CTL1, val, + (val & SPI_NFI_LOAD_TO_CACHE_DONE), + 0, 1 * USEC_PER_SEC); + if (err) + return err; + + err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_LOAD_TO_CACHE_DONE); + if (err) + return err; + + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + if (err < 0) + return err; + + return len; +} + +static int airoha_snand_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi); + u8 data[8], cmd, opcode = op->cmd.opcode; + struct airoha_snand_ctrl *as_ctrl; + int i, err; + + as_ctrl = spi_controller_get_devdata(mem->spi->controller); + if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE && + op->addr.val == as_dev->cur_page_num) { + as_dev->data_need_update = true; + } else if (opcode == SPI_NAND_OP_PAGE_READ) { + if (!as_dev->data_need_update && + op->addr.val == as_dev->cur_page_num) + return 0; + + as_dev->data_need_update = true; + as_dev->cur_page_num = op->addr.val; + } + + /* switch to manual mode */ + err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + if (err < 0) + return err; + + err = airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_LOW); + if (err < 0) + return err; + + /* opcode */ + err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode)); + if (err) + return err; + + /* addr part */ + cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8; + put_unaligned_be64(op->addr.val, data); + + for (i = ARRAY_SIZE(data) - op->addr.nbytes; + i < ARRAY_SIZE(data); i++) { + err = airoha_snand_write_data(as_ctrl, cmd, &data[i], + sizeof(data[0])); + if (err) + return err; + } + + /* dummy */ + data[0] = 0xff; + for (i = 0; i < op->dummy.nbytes; i++) { + err = airoha_snand_write_data(as_ctrl, 0x8, &data[0], + sizeof(data[0])); + if (err) + return err; + } + + /* data */ + if (op->data.dir == SPI_MEM_DATA_IN) { + err = airoha_snand_read_data(as_ctrl, op->data.buf.in, + op->data.nbytes); + if (err) + return err; + } else { + err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out, + op->data.nbytes); + if (err) + return err; + } + + return airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_HIGH); +} + +static const struct spi_controller_mem_ops airoha_snand_mem_ops = { + .adjust_op_size = airoha_snand_adjust_op_size, + .supports_op = airoha_snand_supports_op, + .exec_op = airoha_snand_exec_op, + .dirmap_create = airoha_snand_dirmap_create, + .dirmap_read = airoha_snand_dirmap_read, + .dirmap_write = airoha_snand_dirmap_write, +}; + +static int airoha_snand_setup(struct spi_device *spi) +{ + struct airoha_snand_ctrl *as_ctrl; + struct airoha_snand_dev *as_dev; + + as_ctrl = spi_controller_get_devdata(spi->controller); + + as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL); + if (!as_dev) + return -ENOMEM; + + /* prepare device buffer */ + as_dev->buf_len = SPI_NAND_CACHE_SIZE; + as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len, + GFP_KERNEL); + if (!as_dev->txrx_buf) + return -ENOMEM; + + as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf, + as_dev->buf_len, DMA_BIDIRECTIONAL); + if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr)) + return -ENOMEM; + + as_dev->data_need_update = true; + spi_set_ctldata(spi, as_dev); + + return 0; +} + +static void airoha_snand_cleanup(struct spi_device *spi) +{ + struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); + struct airoha_snand_ctrl *as_ctrl; + + as_ctrl = spi_controller_get_devdata(spi->controller); + dma_unmap_single(as_ctrl->dev, as_dev->dma_addr, + as_dev->buf_len, DMA_BIDIRECTIONAL); + spi_set_ctldata(spi, NULL); +} + +static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl) +{ + u32 val, sec_size, sec_num; + int err; + + err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, &val); + if (err) + return err; + + sec_num = FIELD_GET(SPI_NFI_SEC_NUM, val); + + err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE, &val); + if (err) + return err; + + sec_size = FIELD_GET(SPI_NFI_CUS_SEC_SIZE, val); + + /* init default value */ + as_ctrl->nfi_cfg.sec_size = sec_size; + as_ctrl->nfi_cfg.sec_num = sec_num; + as_ctrl->nfi_cfg.page_size = round_down(sec_size * sec_num, 1024); + as_ctrl->nfi_cfg.spare_size = 16; + + err = airoha_snand_nfi_init(as_ctrl); + if (err) + return err; + + return airoha_snand_nfi_config(as_ctrl); +} + +static const struct regmap_config spi_ctrl_regmap_config = { + .name = "ctrl", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = REG_SPI_CTRL_NFI2SPI_EN, +}; + +static const struct regmap_config spi_nfi_regmap_config = { + .name = "nfi", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = REG_SPI_NFI_SNF_NFI_CNFG, +}; + +static const struct of_device_id airoha_snand_ids[] = { + { .compatible = "airoha,en7581-snand" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, airoha_snand_ids); + +static int airoha_snand_probe(struct platform_device *pdev) +{ + struct airoha_snand_ctrl *as_ctrl; + struct device *dev = &pdev->dev; + struct spi_controller *ctrl; + void __iomem *base; + int err; + + ctrl = devm_spi_alloc_host(dev, sizeof(*as_ctrl)); + if (!ctrl) + return -ENOMEM; + + as_ctrl = spi_controller_get_devdata(ctrl); + as_ctrl->dev = dev; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + as_ctrl->regmap_ctrl = devm_regmap_init_mmio(dev, base, + &spi_ctrl_regmap_config); + if (IS_ERR(as_ctrl->regmap_ctrl)) + return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_ctrl), + "failed to init spi ctrl regmap\n"); + + base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(base)) + return PTR_ERR(base); + + as_ctrl->regmap_nfi = devm_regmap_init_mmio(dev, base, + &spi_nfi_regmap_config); + if (IS_ERR(as_ctrl->regmap_nfi)) + return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_nfi), + "failed to init spi nfi regmap\n"); + + as_ctrl->spi_clk = devm_clk_get_enabled(dev, "spi"); + if (IS_ERR(as_ctrl->spi_clk)) + return dev_err_probe(dev, PTR_ERR(as_ctrl->spi_clk), + "unable to get spi clk\n"); + + err = dma_set_mask(as_ctrl->dev, DMA_BIT_MASK(32)); + if (err) + return err; + + ctrl->num_chipselect = 2; + ctrl->mem_ops = &airoha_snand_mem_ops; + ctrl->bits_per_word_mask = SPI_BPW_MASK(8); + ctrl->mode_bits = SPI_RX_DUAL; + ctrl->setup = airoha_snand_setup; + ctrl->cleanup = airoha_snand_cleanup; + device_set_node(&ctrl->dev, dev_fwnode(dev)); + + err = airoha_snand_nfi_setup(as_ctrl); + if (err) + return err; + + return devm_spi_register_controller(dev, ctrl); +} + +static struct platform_driver airoha_snand_driver = { + .driver = { + .name = "airoha-spi", + .of_match_table = airoha_snand_ids, + }, + .probe = airoha_snand_probe, +}; +module_platform_driver(airoha_snand_driver); + +MODULE_DESCRIPTION("Airoha SPI-NAND Flash Controller Driver"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_AUTHOR("Ray Liu <ray.liu@airoha.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c index 72e7a0f217..585393802e 100644 --- a/drivers/spi/spi-altera-platform.c +++ b/drivers/spi/spi-altera-platform.c @@ -169,4 +169,3 @@ module_platform_driver(altera_spi_driver); MODULE_DESCRIPTION("Altera SPI driver"); MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index 5d9b246b69..2245ad54b0 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/iopoll.h> +#include <linux/spi/spi-mem.h> #define AMD_SPI_CTRL0_REG 0x00 #define AMD_SPI_EXEC_CMD BIT(16) @@ -35,6 +36,7 @@ #define AMD_SPI_FIFO_SIZE 70 #define AMD_SPI_MEM_SIZE 200 +#define AMD_SPI_MAX_DATA 64 #define AMD_SPI_ENA_REG 0x20 #define AMD_SPI_ALT_SPD_SHIFT 20 @@ -358,6 +360,115 @@ fin_msg: return message->status; } +static bool amd_spi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + /* bus width is number of IO lines used to transmit */ + if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 || + op->data.buswidth > 1 || op->data.nbytes > AMD_SPI_MAX_DATA) + return false; + + return spi_mem_default_supports_op(mem, op); +} + +static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA); + return 0; +} + +static void amd_spi_set_addr(struct amd_spi *amd_spi, + const struct spi_mem_op *op) +{ + u8 nbytes = op->addr.nbytes; + u64 addr_val = op->addr.val; + int base_addr, i; + + base_addr = AMD_SPI_FIFO_BASE + nbytes; + + for (i = 0; i < nbytes; i++) { + amd_spi_writereg8(amd_spi, base_addr - i - 1, addr_val & + GENMASK(7, 0)); + addr_val >>= 8; + } +} + +static void amd_spi_mem_data_out(struct amd_spi *amd_spi, + const struct spi_mem_op *op) +{ + int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes; + u8 *buf = (u8 *)op->data.buf.out; + u32 nbytes = op->data.nbytes; + int i; + + amd_spi_set_opcode(amd_spi, op->cmd.opcode); + amd_spi_set_addr(amd_spi, op); + + for (i = 0; i < nbytes; i++) + amd_spi_writereg8(amd_spi, (base_addr + i), buf[i]); + + amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes); + amd_spi_set_rx_count(amd_spi, 0); + amd_spi_clear_fifo_ptr(amd_spi); + amd_spi_execute_opcode(amd_spi); +} + +static void amd_spi_mem_data_in(struct amd_spi *amd_spi, + const struct spi_mem_op *op) +{ + int offset = (op->addr.nbytes == 0) ? 0 : 1; + u8 *buf = (u8 *)op->data.buf.in; + u32 nbytes = op->data.nbytes; + int base_addr, i; + + base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes + offset; + + amd_spi_set_opcode(amd_spi, op->cmd.opcode); + amd_spi_set_addr(amd_spi, op); + amd_spi_set_tx_count(amd_spi, op->addr.nbytes); + amd_spi_set_rx_count(amd_spi, op->data.nbytes + 1); + amd_spi_clear_fifo_ptr(amd_spi); + amd_spi_execute_opcode(amd_spi); + amd_spi_busy_wait(amd_spi); + + for (i = 0; i < nbytes; i++) + buf[i] = amd_spi_readreg8(amd_spi, base_addr + i); +} + +static int amd_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct amd_spi *amd_spi; + int ret; + + amd_spi = spi_controller_get_devdata(mem->spi->controller); + + ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz); + if (ret) + return ret; + + switch (op->data.dir) { + case SPI_MEM_DATA_IN: + amd_spi_mem_data_in(amd_spi, op); + break; + case SPI_MEM_DATA_OUT: + fallthrough; + case SPI_MEM_NO_DATA: + amd_spi_mem_data_out(amd_spi, op); + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static const struct spi_controller_mem_ops amd_spi_mem_ops = { + .exec_op = amd_spi_exec_mem_op, + .adjust_op_size = amd_spi_adjust_op_size, + .supports_op = amd_spi_supports_op, +}; + static int amd_spi_host_transfer(struct spi_controller *host, struct spi_message *msg) { @@ -409,6 +520,7 @@ static int amd_spi_probe(struct platform_device *pdev) host->min_speed_hz = AMD_SPI_MIN_HZ; host->setup = amd_spi_host_setup; host->transfer_one_message = amd_spi_host_transfer; + host->mem_ops = &amd_spi_mem_ops; host->max_transfer_size = amd_spi_max_transfer_size; host->max_message_size = amd_spi_max_transfer_size; diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 3c9ed41293..02c1e62574 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -339,7 +339,7 @@ static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id) static bool a3700_spi_wait_completion(struct spi_device *spi) { struct a3700_spi *a3700_spi; - unsigned int timeout; + unsigned long time_left; unsigned int ctrl_reg; unsigned long timeout_jiffies; @@ -361,12 +361,12 @@ static bool a3700_spi_wait_completion(struct spi_device *spi) a3700_spi->wait_mask); timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT); - timeout = wait_for_completion_timeout(&a3700_spi->done, - timeout_jiffies); + time_left = wait_for_completion_timeout(&a3700_spi->done, + timeout_jiffies); a3700_spi->wait_mask = 0; - if (timeout) + if (time_left) return true; /* there might be the case that right after we checked the diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index bad3499845..b62f57390d 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -987,8 +987,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_controller *host, * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: * - The buffer is either valid for CPU access, else NULL * - If the buffer is valid, so is its DMA address - * - * This driver manages the dma address unless message->is_dma_mapped. */ static int atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) @@ -1374,8 +1372,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host, * DMA map early, for performance (empties dcache ASAP) and * better fault reporting. */ - if ((!host->cur_msg->is_dma_mapped) - && as->use_pdc) { + if (as->use_pdc) { if (atmel_spi_dma_map_xfer(as, xfer) < 0) return -ENOMEM; } @@ -1454,8 +1451,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host, } } - if (!host->cur_msg->is_dma_mapped - && as->use_pdc) + if (as->use_pdc) atmel_spi_dma_unmap_xfer(host, xfer); if (as->use_pdc) diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c index 825d2f1cdf..16f200bb3d 100644 --- a/drivers/spi/spi-au1550.c +++ b/drivers/spi/spi-au1550.c @@ -314,11 +314,8 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) hw->tx = t->tx_buf; hw->rx = t->rx_buf; - dma_tx_addr = t->tx_dma; - dma_rx_addr = t->rx_dma; /* - * check if buffers are already dma mapped, map them otherwise: * - first map the TX buffer, so cache data gets written to memory * - then map the RX buffer, so that cache entries (with * soon-to-be-stale data) get removed @@ -326,23 +323,17 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) * use temp rx buffer (preallocated or realloc to fit) for rx dma */ if (t->tx_buf) { - if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ - dma_tx_addr = dma_map_single(hw->dev, - (void *)t->tx_buf, - t->len, DMA_TO_DEVICE); - if (dma_mapping_error(hw->dev, dma_tx_addr)) - dev_err(hw->dev, "tx dma map error\n"); - } + dma_tx_addr = dma_map_single(hw->dev, (void *)t->tx_buf, + t->len, DMA_TO_DEVICE); + if (dma_mapping_error(hw->dev, dma_tx_addr)) + dev_err(hw->dev, "tx dma map error\n"); } if (t->rx_buf) { - if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ - dma_rx_addr = dma_map_single(hw->dev, - (void *)t->rx_buf, - t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(hw->dev, dma_rx_addr)) - dev_err(hw->dev, "rx dma map error\n"); - } + dma_rx_addr = dma_map_single(hw->dev, (void *)t->rx_buf, + t->len, DMA_FROM_DEVICE); + if (dma_mapping_error(hw->dev, dma_rx_addr)) + dev_err(hw->dev, "rx dma map error\n"); } else { if (t->len > hw->dma_rx_tmpbuf_size) { int ret; @@ -398,10 +389,10 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) DMA_FROM_DEVICE); } /* unmap buffers if mapped above */ - if (t->rx_buf && t->rx_dma == 0) + if (t->rx_buf) dma_unmap_single(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); - if (t->tx_buf && t->tx_dma == 0) + if (t->tx_buf) dma_unmap_single(hw->dev, dma_tx_addr, t->len, DMA_TO_DEVICE); diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index a0e2204fc0..ca5cc67555 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * polling/bitbanging SPI master controller driver utilities + * Polling/bitbanging SPI host controller controller driver utilities */ #include <linux/spinlock.h> @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/time64.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -168,8 +169,8 @@ int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) if (!hz) hz = spi->max_speed_hz; if (hz) { - cs->nsecs = (1000000000/2) / hz; - if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) + cs->nsecs = (NSEC_PER_SEC / 2) / hz; + if (cs->nsecs > (MAX_UDELAY_MS * NSEC_PER_MSEC)) return -EINVAL; } @@ -393,12 +394,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang) EXPORT_SYMBOL_GPL(spi_bitbang_init); /** - * spi_bitbang_start - start up a polled/bitbanging SPI master driver + * spi_bitbang_start - start up a polled/bitbanging SPI host controller driver * @bitbang: driver handle * * Caller should have zero-initialized all parts of the structure, and then - * provided callbacks for chip selection and I/O loops. If the master has - * a transfer method, its final step should call spi_bitbang_transfer; or, + * provided callbacks for chip selection and I/O loops. If the host controller has + * a transfer method, its final step should call spi_bitbang_transfer(); or, * that's the default if the transfer routine is not initialized. It should * also set up the bus number and number of chipselects. * @@ -406,9 +407,9 @@ EXPORT_SYMBOL_GPL(spi_bitbang_init); * hardware that basically exposes a shift register) or per-spi_transfer * (which takes better advantage of hardware like fifos or DMA engines). * - * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup, - * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi - * master methods. Those methods are the defaults if the bitbang->txrx_bufs + * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup(), + * spi_bitbang_cleanup() and spi_bitbang_setup_transfer() to handle those SPI + * host controller methods. Those methods are the defaults if the bitbang->txrx_bufs * routine isn't initialized. * * This routine registers the spi_controller, which will process requests in a @@ -417,7 +418,7 @@ EXPORT_SYMBOL_GPL(spi_bitbang_init); * * On success, this routine will take a reference to the controller. The caller * is responsible for calling spi_bitbang_stop() to decrement the reference and - * spi_controller_put() as counterpart of spi_alloc_master() to prevent a memory + * spi_controller_put() as counterpart of spi_alloc_host() to prevent a memory * leak. */ int spi_bitbang_start(struct spi_bitbang *bitbang) @@ -450,4 +451,4 @@ void spi_bitbang_stop(struct spi_bitbang *bitbang) EXPORT_SYMBOL_GPL(spi_bitbang_stop); MODULE_LICENSE("GPL"); - +MODULE_DESCRIPTION("Utilities for Bitbanging SPI host controllers"); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 350b3dab3a..05ebb03d31 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -42,6 +42,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) #define CQSPI_SLOW_SRAM BIT(4) #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) +#define CQSPI_RD_NO_IRQ BIT(6) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -102,6 +103,8 @@ struct cqspi_st { bool apb_ahb_hazard; bool is_jh7110; /* Flag for StarFive JH7110 SoC */ + + const struct cqspi_driver_platdata *ddata; }; struct cqspi_driver_platdata { @@ -117,6 +120,7 @@ struct cqspi_driver_platdata { /* Operation timeout value */ #define CQSPI_TIMEOUT_MS 500 #define CQSPI_READ_TIMEOUT_MS 10 +#define CQSPI_BUSYWAIT_TIMEOUT_US 500 /* Runtime_pm autosuspend delay */ #define CQSPI_AUTOSUSPEND_TIMEOUT 2000 @@ -295,13 +299,27 @@ struct cqspi_driver_platdata { #define CQSPI_REG_VERSAL_DMA_VAL 0x602 -static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr) +static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, + void __iomem *reg, const u32 mask, bool clr, + bool busywait) { + u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; u32 val; + if (busywait) { + int ret = readl_relaxed_poll_timeout(reg, val, + (((clr ? ~val : val) & mask) == mask), + 0, CQSPI_BUSYWAIT_TIMEOUT_US); + + if (ret != -ETIMEDOUT) + return ret; + + timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; + } + return readl_relaxed_poll_timeout(reg, val, (((clr ? ~val : val) & mask) == mask), - 10, CQSPI_TIMEOUT_MS * 1000); + 10, timeout_us); } static bool cqspi_is_idle(struct cqspi_st *cqspi) @@ -334,11 +352,8 @@ static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) { struct cqspi_st *cqspi = dev; + const struct cqspi_driver_platdata *ddata = cqspi->ddata; unsigned int irq_status; - struct device *device = &cqspi->pdev->dev; - const struct cqspi_driver_platdata *ddata; - - ddata = of_device_get_match_data(device); /* Read interrupt status */ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); @@ -434,8 +449,8 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) writel(reg, reg_base + CQSPI_REG_CMDCTRL); /* Polling for completion. */ - ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL, - CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1); + ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL, + CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true); if (ret) { dev_err(&cqspi->pdev->dev, "Flash command execution timed out.\n"); @@ -492,8 +507,11 @@ static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, if (ret) return ret; } else { - reg &= ~CQSPI_REG_CONFIG_DTR_PROTO; - reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE; + unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; + /* Shortcut if DTR is already disabled. */ + if ((reg & mask) == 0) + return 0; + reg &= ~mask; } writel(reg, reg_base + CQSPI_REG_CONFIG); @@ -700,6 +718,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, const size_t n_rx) { struct cqspi_st *cqspi = f_pdata->cqspi; + bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); struct device *dev = &cqspi->pdev->dev; void __iomem *reg_base = cqspi->iobase; void __iomem *ahb_base = cqspi->ahb_base; @@ -723,17 +742,20 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, * all the read interrupts disabled for max performance. */ - if (!cqspi->slow_sram) + if (use_irq && cqspi->slow_sram) + writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); + else if (use_irq) writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); else - writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); + writel(0, reg_base + CQSPI_REG_IRQMASK); reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTRD_START_MASK, reg_base + CQSPI_REG_INDIRECTRD); while (remaining > 0) { - if (!wait_for_completion_timeout(&cqspi->transfer_complete, + if (use_irq && + !wait_for_completion_timeout(&cqspi->transfer_complete, msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) ret = -ETIMEDOUT; @@ -775,7 +797,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, bytes_to_read = cqspi_get_rd_sram_level(cqspi); } - if (remaining > 0) { + if (use_irq && remaining > 0) { reinit_completion(&cqspi->transfer_complete); if (cqspi->slow_sram) writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); @@ -783,8 +805,8 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, } /* Check indirect done status */ - ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, - CQSPI_REG_INDIRECTRD_DONE_MASK, 0); + ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD, + CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true); if (ret) { dev_err(dev, "Indirect read completion error (%i)\n", ret); goto failrd; @@ -1084,8 +1106,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, } /* Check indirect done status */ - ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, - CQSPI_REG_INDIRECTWR_DONE_MASK, 0); + ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR, + CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false); if (ret) { dev_err(dev, "Indirect write completion error (%i)\n", ret); goto failwr; @@ -1358,16 +1380,13 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, const struct spi_mem_op *op) { struct cqspi_st *cqspi = f_pdata->cqspi; - struct device *dev = &cqspi->pdev->dev; - const struct cqspi_driver_platdata *ddata; + const struct cqspi_driver_platdata *ddata = cqspi->ddata; loff_t from = op->addr.val; size_t len = op->data.nbytes; u_char *buf = op->data.buf.in; u64 dma_align = (u64)(uintptr_t)buf; int ret; - ddata = of_device_get_match_data(dev); - ret = cqspi_read_setup(f_pdata, op); if (ret) return ret; @@ -1511,8 +1530,8 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi) cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { - dev_err(dev, "couldn't determine fifo-depth\n"); - return -ENXIO; + /* Zero signals FIFO depth should be runtime detected. */ + cqspi->fifo_depth = 0; } if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { @@ -1542,8 +1561,6 @@ static void cqspi_controller_init(struct cqspi_st *cqspi) { u32 reg; - cqspi_controller_enable(cqspi, 0); - /* Configure the remap address register, no remap */ writel(0, cqspi->iobase + CQSPI_REG_REMAP); @@ -1577,8 +1594,29 @@ static void cqspi_controller_init(struct cqspi_st *cqspi) reg |= CQSPI_REG_CONFIG_DMA_MASK; writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); } +} - cqspi_controller_enable(cqspi, 1); +static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) +{ + struct device *dev = &cqspi->pdev->dev; + u32 reg, fifo_depth; + + /* + * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N + * the FIFO depth. + */ + writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION); + reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION); + fifo_depth = reg + 1; + + /* FIFO depth of zero means no value from devicetree was provided. */ + if (cqspi->fifo_depth == 0) { + cqspi->fifo_depth = fifo_depth; + dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth); + } else if (fifo_depth != cqspi->fifo_depth) { + dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n", + fifo_depth, cqspi->fifo_depth); + } } static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) @@ -1731,6 +1769,7 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->pdev = pdev; cqspi->host = host; cqspi->is_jh7110 = false; + cqspi->ddata = ddata = of_device_get_match_data(dev); platform_set_drvdata(pdev, cqspi); /* Obtain configuration from OF. */ @@ -1822,7 +1861,6 @@ static int cqspi_probe(struct platform_device *pdev) /* write completion is supported by default */ cqspi->wr_completion = true; - ddata = of_device_get_match_data(dev); if (ddata) { if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, @@ -1864,7 +1902,10 @@ static int cqspi_probe(struct platform_device *pdev) } cqspi_wait_idle(cqspi); + cqspi_controller_enable(cqspi, 0); + cqspi_controller_detect_fifo_depth(cqspi); cqspi_controller_init(cqspi); + cqspi_controller_enable(cqspi, 1); cqspi->current_cs = -1; cqspi->sclk = 0; @@ -1947,7 +1988,9 @@ static int cqspi_runtime_resume(struct device *dev) clk_prepare_enable(cqspi->clk); cqspi_wait_idle(cqspi); + cqspi_controller_enable(cqspi, 0); cqspi_controller_init(cqspi); + cqspi_controller_enable(cqspi, 1); cqspi->current_cs = -1; cqspi->sclk = 0; @@ -2012,6 +2055,12 @@ static const struct cqspi_driver_platdata pensando_cdns_qspi = { .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, }; +static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | + CQSPI_RD_NO_IRQ, +}; + static const struct of_device_id cqspi_dt_ids[] = { { .compatible = "cdns,qspi-nor", @@ -2045,6 +2094,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "amd,pensando-elba-qspi", .data = &pensando_cdns_qspi, }, + { + .compatible = "mobileye,eyeq5-ospi", + .data = &mobileye_eyeq5_ospi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index cdce2e280f..2e3eacd46b 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -496,20 +496,14 @@ static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev) static int cdns_xspi_of_get_plat_data(struct platform_device *pdev) { struct device_node *node_prop = pdev->dev.of_node; - struct device_node *node_child; unsigned int cs; - for_each_child_of_node(node_prop, node_child) { - if (!of_device_is_available(node_child)) - continue; - + for_each_available_child_of_node_scoped(node_prop, node_child) { if (of_property_read_u32(node_child, "reg", &cs)) { dev_err(&pdev->dev, "Couldn't get memory chip select\n"); - of_node_put(node_child); return -ENXIO; } else if (cs >= CDNS_XSPI_MAX_BANKS) { dev_err(&pdev->dev, "reg (cs) parameter value too large\n"); - of_node_put(node_child); return -ENXIO; } } diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index b341b6908d..e83cd0510f 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c @@ -500,7 +500,6 @@ static const struct dev_pm_ops mcfqspi_pm = { static struct platform_driver mcfqspi_driver = { .driver.name = DRIVER_NAME, - .driver.owner = THIS_MODULE, .driver.pm = &mcfqspi_pm, .probe = mcfqspi_probe, .remove_new = mcfqspi_remove, diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index 0d9c948e11..8b618ef0f7 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -5,10 +5,14 @@ // Copyright (C) 2022-2023 Cirrus Logic, Inc. and // Cirrus Logic International Semiconductor Ltd. +#include <linux/acpi.h> +#include <linux/array_size.h> #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/device.h> #include <linux/errno.h> +#include <linux/gpio/machine.h> +#include <linux/gpio/property.h> #include <linux/mfd/cs42l43.h> #include <linux/mfd/cs42l43-regs.h> #include <linux/mod_devicetable.h> @@ -16,6 +20,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/units.h> @@ -39,6 +44,44 @@ static const unsigned int cs42l43_clock_divs[] = { 2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 }; +static const struct software_node ampl = { + .name = "cs35l56-left", +}; + +static const struct software_node ampr = { + .name = "cs35l56-right", +}; + +static struct spi_board_info ampl_info = { + .modalias = "cs35l56", + .max_speed_hz = 11 * HZ_PER_MHZ, + .chip_select = 0, + .mode = SPI_MODE_0, + .swnode = &l, +}; + +static struct spi_board_info ampr_info = { + .modalias = "cs35l56", + .max_speed_hz = 11 * HZ_PER_MHZ, + .chip_select = 1, + .mode = SPI_MODE_0, + .swnode = &r, +}; + +static const struct software_node cs42l43_gpiochip_swnode = { + .name = "cs42l43-pinctrl", +}; + +static const struct software_node_ref_args cs42l43_cs_refs[] = { + SOFTWARE_NODE_REFERENCE(&cs42l43_gpiochip_swnode, 0, GPIO_ACTIVE_LOW), + SOFTWARE_NODE_REFERENCE(&swnode_gpio_undefined), +}; + +static const struct property_entry cs42l43_cs_props[] = { + PROPERTY_ENTRY_REF_ARRAY("cs-gpios", cs42l43_cs_refs), + {} +}; + static int cs42l43_spi_tx(struct regmap *regmap, const u8 *buf, unsigned int len) { const u8 *end = buf + len; @@ -203,16 +246,59 @@ static size_t cs42l43_spi_max_length(struct spi_device *spi) return CS42L43_SPI_MAX_LENGTH; } +static bool cs42l43_has_sidecar(struct fwnode_handle *fwnode) +{ + static const u32 func_smart_amp = 0x1; + struct fwnode_handle *child_fwnode, *ext_fwnode; + unsigned int val; + u32 function; + int ret; + + fwnode_for_each_child_node(fwnode, child_fwnode) { + acpi_handle handle = ACPI_HANDLE_FWNODE(child_fwnode); + + ret = acpi_get_local_address(handle, &function); + if (ret || function != func_smart_amp) + continue; + + ext_fwnode = fwnode_get_named_child_node(child_fwnode, + "mipi-sdca-function-expansion-subproperties"); + if (!ext_fwnode) + continue; + + ret = fwnode_property_read_u32(ext_fwnode, + "01fa-sidecar-instances", + &val); + + fwnode_handle_put(ext_fwnode); + + if (ret) + continue; + + fwnode_handle_put(child_fwnode); + + return !!val; + } + + return false; +} + static void cs42l43_release_of_node(void *data) { fwnode_handle_put(data); } +static void cs42l43_release_sw_node(void *data) +{ + software_node_unregister(&cs42l43_gpiochip_swnode); +} + static int cs42l43_spi_probe(struct platform_device *pdev) { struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent); struct cs42l43_spi *priv; struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev); + bool has_sidecar = cs42l43_has_sidecar(fwnode); int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -259,21 +345,45 @@ static int cs42l43_spi_probe(struct platform_device *pdev) if (is_of_node(fwnode)) { fwnode = fwnode_get_named_child_node(fwnode, "spi"); - ret = devm_add_action(priv->dev, cs42l43_release_of_node, fwnode); - if (ret) { - fwnode_handle_put(fwnode); + ret = devm_add_action_or_reset(priv->dev, cs42l43_release_of_node, fwnode); + if (ret) return ret; - } } - device_set_node(&priv->ctlr->dev, fwnode); + if (has_sidecar) { + ret = software_node_register(&cs42l43_gpiochip_swnode); + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to register gpio swnode\n"); + + ret = devm_add_action_or_reset(priv->dev, cs42l43_release_sw_node, NULL); + if (ret) + return ret; + + ret = device_create_managed_software_node(&priv->ctlr->dev, + cs42l43_cs_props, NULL); + if (ret) + return dev_err_probe(priv->dev, ret, "Failed to add swnode\n"); + } else { + device_set_node(&priv->ctlr->dev, fwnode); + } ret = devm_spi_register_controller(priv->dev, priv->ctlr); - if (ret) { - dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret); + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to register SPI controller\n"); + + if (has_sidecar) { + if (!spi_new_device(priv->ctlr, &l_info)) + return dev_err_probe(priv->dev, -ENODEV, + "Failed to create left amp slave\n"); + + if (!spi_new_device(priv->ctlr, &r_info)) + return dev_err_probe(priv->dev, -ENODEV, + "Failed to create right amp slave\n"); } - return ret; + return 0; } static const struct platform_device_id cs42l43_spi_id_table[] = { @@ -291,6 +401,7 @@ static struct platform_driver cs42l43_spi_driver = { }; module_platform_driver(cs42l43_spi_driver); +MODULE_IMPORT_NS(GPIO_SWNODE); MODULE_DESCRIPTION("CS42L43 SPI Driver"); MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>"); MODULE_AUTHOR("Maciej Strozek <mstrozek@opensource.cirrus.com>"); diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index 0274c92955..ddfdb90304 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -6,6 +6,7 @@ */ #include <linux/bitfield.h> +#include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -421,10 +422,7 @@ static int dw_spi_transfer_one(struct spi_controller *host, int ret; dws->dma_mapped = 0; - dws->n_bytes = - roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word, - BITS_PER_BYTE)); - + dws->n_bytes = roundup_pow_of_two(BITS_TO_BYTES(transfer->bits_per_word)); dws->tx = (void *)transfer->tx_buf; dws->tx_len = transfer->len / dws->n_bytes; dws->rx = transfer->rx_buf; @@ -837,6 +835,20 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws) } /* + * Try to detect the number of native chip-selects if the platform + * driver didn't set it up. There can be up to 16 lines configured. + */ + if (!dws->num_cs) { + u32 ser; + + dw_writel(dws, DW_SPI_SER, 0xffff); + ser = dw_readl(dws, DW_SPI_SER); + dw_writel(dws, DW_SPI_SER, 0); + + dws->num_cs = hweight16(ser); + } + + /* * Try to detect the FIFO depth if not set by interface driver, * the depth could be from 2 to 256 from HW spec */ diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index cc74cbe034..819907e332 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -320,7 +320,11 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) struct resource *mem; struct dw_spi *dws; int ret; - int num_cs; + + if (device_property_read_bool(&pdev->dev, "spi-slave")) { + dev_warn(&pdev->dev, "spi-slave is not yet supported\n"); + return -ENODEV; + } dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio), GFP_KERNEL); @@ -364,11 +368,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) &dws->reg_io_width)) dws->reg_io_width = 4; - num_cs = 4; - - device_property_read_u32(&pdev->dev, "num-cs", &num_cs); - - dws->num_cs = num_cs; + /* Rely on the auto-detection if no property specified */ + device_property_read_u32(&pdev->dev, "num-cs", &dws->num_cs); init_func = device_get_match_data(&pdev->dev); if (init_func) { diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 6cafeee8ee..fc267c6437 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -164,8 +164,8 @@ struct dw_spi { u32 max_freq; /* max bus freq supported */ u32 reg_io_width; /* DR I/O width in bytes */ + u32 num_cs; /* chip select lines */ u16 bus_num; - u16 num_cs; /* supported slave numbers */ void (*set_cs)(struct spi_device *spi, bool enable); /* Current message transfer state info */ diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 47c7a5c625..e335132080 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -98,19 +98,13 @@ static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); } -int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, bool is_dma_mapped) +int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t) { struct device *dev = mspi->dev; struct fsl_spi_reg __iomem *reg_base = mspi->reg_base; - if (is_dma_mapped) { - mspi->map_tx_dma = 0; - mspi->map_rx_dma = 0; - } else { - mspi->map_tx_dma = 1; - mspi->map_rx_dma = 1; - } + mspi->map_tx_dma = 1; + mspi->map_rx_dma = 1; if (!t->tx_buf) { mspi->tx_dma = mspi->dma_dummy_tx; @@ -147,7 +141,7 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, return -ENOMEM; } } else if (t->tx_buf) { - mspi->tx_dma = t->tx_dma; + mspi->tx_dma = 0; } if (mspi->map_rx_dma) { diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h index 160f999708..e012abba05 100644 --- a/drivers/spi/spi-fsl-cpm.h +++ b/drivers/spi/spi-fsl-cpm.h @@ -20,7 +20,7 @@ #ifdef CONFIG_FSL_SOC extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi); extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, bool is_dma_mapped); + struct spi_transfer *t); extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi); extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events); extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi); @@ -28,8 +28,7 @@ extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi); #else static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { } static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, - bool is_dma_mapped) { return 0; } + struct spi_transfer *t) { return 0; } static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { } static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { } static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 38defdcf93..0a2730cd07 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1458,7 +1458,6 @@ static void dspi_shutdown(struct platform_device *pdev) static struct platform_driver fsl_dspi_driver = { .driver.name = DRIVER_NAME, .driver.of_match_table = fsl_dspi_dt_ids, - .driver.owner = THIS_MODULE, .driver.pm = &dspi_pm, .probe = dspi_probe, .remove_new = dspi_remove, diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 92a662d1b5..aa5ed254be 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -553,7 +553,7 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller, { struct dma_async_tx_descriptor *desc_tx, *desc_rx; unsigned long transfer_timeout; - unsigned long timeout; + unsigned long time_left; struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; int ret; @@ -594,9 +594,9 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller, transfer->len); /* Wait eDMA to finish the data transfer.*/ - timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, - transfer_timeout); - if (!timeout) { + time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, + transfer_timeout); + if (!time_left) { dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); @@ -604,9 +604,9 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller, return -ETIMEDOUT; } - timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, - transfer_timeout); - if (!timeout) { + time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, + transfer_timeout); + if (!time_left) { dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 97faf98480..997e07c0a2 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -249,8 +249,7 @@ static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, return 0; } -static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, - bool is_dma_mapped) +static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(spi->controller); struct fsl_spi_reg __iomem *reg_base; @@ -274,7 +273,7 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, reinit_completion(&mpc8xxx_spi->done); if (mpc8xxx_spi->flags & SPI_CPM_MODE) - ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t); else ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) @@ -353,7 +352,7 @@ static int fsl_spi_transfer_one(struct spi_controller *controller, if (status < 0) return status; if (t->len) - status = fsl_spi_bufs(spi, t, !!t->tx_dma || !!t->rx_dma); + status = fsl_spi_bufs(spi, t); if (status > 0) return -EMSGSIZE; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 09c676e50f..1439883326 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1395,7 +1395,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, { struct dma_async_tx_descriptor *desc_tx, *desc_rx; unsigned long transfer_timeout; - unsigned long timeout; + unsigned long time_left; struct spi_controller *controller = spi_imx->controller; struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents); @@ -1461,18 +1461,18 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); /* Wait SDMA to finish the data transfer.*/ - timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, + time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion, transfer_timeout); - if (!timeout) { + if (!time_left) { dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); return -ETIMEDOUT; } - timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion, - transfer_timeout); - if (!timeout) { + time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion, + transfer_timeout); + if (!time_left) { dev_err(&controller->dev, "I/O Error in DMA RX\n"); spi_imx->devtype_data->reset(spi_imx); dmaengine_terminate_all(controller->dma_rx); @@ -1491,7 +1491,7 @@ static int spi_imx_pio_transfer(struct spi_device *spi, { struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); unsigned long transfer_timeout; - unsigned long timeout; + unsigned long time_left; spi_imx->tx_buf = transfer->tx_buf; spi_imx->rx_buf = transfer->rx_buf; @@ -1507,9 +1507,9 @@ static int spi_imx_pio_transfer(struct spi_device *spi, transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); - timeout = wait_for_completion_timeout(&spi_imx->xfer_done, - transfer_timeout); - if (!timeout) { + time_left = wait_for_completion_timeout(&spi_imx->xfer_done, + transfer_timeout); + if (!time_left) { dev_err(&spi->dev, "I/O Error in PIO\n"); spi_imx->devtype_data->reset(spi_imx); return -ETIMEDOUT; diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index fee8893d27..31a878d945 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -396,7 +396,6 @@ MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match); static struct spi_driver spi_loopback_test_driver = { .driver = { .name = "spi-loopback-test", - .owner = THIS_MODULE, .of_match_table = spi_loopback_test_of_match, }, .probe = spi_loopback_test_probe, diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 634364c7cf..99c25e6a93 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -21,7 +21,7 @@ #include <linux/spi/spi.h> #define MAX_LEN (0xffff) -#define MAX_CS (8) +#define MAX_CS (1) #define DEFAULT_FRAMESIZE (8) #define FIFO_DEPTH (32) #define CLK_GEN_MODE1_MAX (255) @@ -75,6 +75,7 @@ #define REG_CONTROL (0x00) #define REG_FRAME_SIZE (0x04) +#define FRAME_SIZE_MASK GENMASK(5, 0) #define REG_STATUS (0x08) #define REG_INT_CLEAR (0x0c) #define REG_RX_DATA (0x10) @@ -89,6 +90,9 @@ #define REG_RIS (0x24) #define REG_CONTROL2 (0x28) #define REG_COMMAND (0x2c) +#define COMMAND_CLRFRAMECNT BIT(4) +#define COMMAND_TXFIFORST BIT(3) +#define COMMAND_RXFIFORST BIT(2) #define REG_PKTSIZE (0x30) #define REG_CMD_SIZE (0x34) #define REG_HWSTATUS (0x38) @@ -103,6 +107,7 @@ struct mchp_corespi { u8 *rx_buf; u32 clk_gen; /* divider for spi output clock generated by the controller */ u32 clk_mode; + u32 pending_slave_select; int irq; int tx_len; int rx_len; @@ -148,62 +153,59 @@ static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) static void mchp_corespi_enable_ints(struct mchp_corespi *spi) { - u32 control, mask = INT_ENABLE_MASK; - - mchp_corespi_disable(spi); - - control = mchp_corespi_read(spi, REG_CONTROL); - - control |= mask; - mchp_corespi_write(spi, REG_CONTROL, control); + u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_ENABLE; + control |= INT_ENABLE_MASK; mchp_corespi_write(spi, REG_CONTROL, control); } static void mchp_corespi_disable_ints(struct mchp_corespi *spi) { - u32 control, mask = INT_ENABLE_MASK; - - mchp_corespi_disable(spi); - - control = mchp_corespi_read(spi, REG_CONTROL); - control &= ~mask; - mchp_corespi_write(spi, REG_CONTROL, control); + u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_ENABLE; + control &= ~INT_ENABLE_MASK; mchp_corespi_write(spi, REG_CONTROL, control); } static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) { u32 control; - u16 lenpart; + u32 lenpart; + u32 frames = mchp_corespi_read(spi, REG_FRAMESUP); /* - * Disable the SPI controller. Writes to transfer length have - * no effect when the controller is enabled. + * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking + * a shortcut requires an explicit clear. */ - mchp_corespi_disable(spi); + if (frames == len) { + mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT); + return; + } /* * The lower 16 bits of the frame count are stored in the control reg * for legacy reasons, but the upper 16 written to a different register: * FRAMESUP. While both the upper and lower bits can be *READ* from the - * FRAMESUP register, writing to the lower 16 bits is a NOP + * FRAMESUP register, writing to the lower 16 bits is (supposedly) a NOP. + * + * The driver used to disable the controller while modifying the frame + * count, and mask off the lower 16 bits of len while writing to + * FRAMES_UP. When the driver was changed to disable the controller as + * infrequently as possible, it was discovered that the logic of + * lenpart = len & 0xffff_0000 + * write(REG_FRAMESUP, lenpart) + * would actually write zeros into the lower 16 bits on an mpfs250t-es, + * despite documentation stating these bits were read-only. + * Writing len unmasked into FRAMES_UP ensures those bits aren't zeroed + * on an mpfs250t-es and will be a NOP for the lower 16 bits on hardware + * that matches the documentation. */ lenpart = len & 0xffff; - control = mchp_corespi_read(spi, REG_CONTROL); control &= ~CONTROL_FRAMECNT_MASK; control |= lenpart << CONTROL_FRAMECNT_SHIFT; mchp_corespi_write(spi, REG_CONTROL, control); - - lenpart = len & 0xffff0000; - mchp_corespi_write(spi, REG_FRAMESUP, lenpart); - - control |= CONTROL_ENABLE; - mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_write(spi, REG_FRAMESUP, len); } static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) @@ -226,17 +228,22 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt) { + u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE); u32 control; + if ((frame_size & FRAME_SIZE_MASK) == bt) + return; + /* * Disable the SPI controller. Writes to the frame size have * no effect when the controller is enabled. */ - mchp_corespi_disable(spi); + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); mchp_corespi_write(spi, REG_FRAME_SIZE, bt); - control = mchp_corespi_read(spi, REG_CONTROL); control |= CONTROL_ENABLE; mchp_corespi_write(spi, REG_CONTROL, control); } @@ -249,8 +256,18 @@ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable) reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); reg &= ~BIT(spi_get_chipselect(spi, 0)); reg |= !disable << spi_get_chipselect(spi, 0); + corespi->pending_slave_select = reg; - mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); + /* + * Only deassert chip select immediately. Writing to some registers + * requires the controller to be disabled, which results in the + * output pins being tristated and can cause the SCLK and MOSI lines + * to transition. Therefore asserting the chip select is deferred + * until just before writing to the TX FIFO, to ensure the device + * doesn't see any spurious clock transitions whilst CS is enabled. + */ + if (((spi->mode & SPI_CS_HIGH) == 0) == disable) + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); } static int mchp_corespi_setup(struct spi_device *spi) @@ -266,6 +283,7 @@ static int mchp_corespi_setup(struct spi_device *spi) if (spi->mode & SPI_CS_HIGH) { reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); reg |= BIT(spi_get_chipselect(spi, 0)); + corespi->pending_slave_select = reg; mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); } return 0; @@ -276,17 +294,13 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * unsigned long clk_hz; u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_MASTER; + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); + control |= CONTROL_MASTER; control &= ~CONTROL_MODE_MASK; control |= MOTOROLA_MODE; - mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); - - /* max. possible spi clock rate is the apb clock rate */ - clk_hz = clk_get_rate(spi->clk); - host->max_speed_hz = clk_hz; - /* * The controller must be configured so that it doesn't remove Chip * Select until the entire message has been transferred, even if at @@ -295,11 +309,16 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames * for the 8 bit transfers that this driver uses. */ - control = mchp_corespi_read(spi, REG_CONTROL); control |= CONTROL_SPS | CONTROL_BIGFIFO; mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + + /* max. possible spi clock rate is the apb clock rate */ + clk_hz = clk_get_rate(spi->clk); + host->max_speed_hz = clk_hz; + mchp_corespi_enable_ints(spi); /* @@ -307,7 +326,8 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * * select is relinquished to the hardware. SSELOUT is enabled too so we * can deal with active high targets. */ - mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT); + spi->pending_slave_select = SSELOUT | SSEL_DIRECT; + mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); control = mchp_corespi_read(spi, REG_CONTROL); @@ -321,8 +341,6 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) { u32 control; - mchp_corespi_disable(spi); - control = mchp_corespi_read(spi, REG_CONTROL); if (spi->clk_mode) control |= CONTROL_CLKMODE; @@ -331,12 +349,12 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen); mchp_corespi_write(spi, REG_CONTROL, control); - mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE); } static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode) { - u32 control, mode_val; + u32 mode_val; + u32 control = mchp_corespi_read(spi, REG_CONTROL); switch (mode & SPI_MODE_X_MASK) { case SPI_MODE_0: @@ -354,12 +372,13 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int } /* - * Disable the SPI controller. Writes to the frame size have + * Disable the SPI controller. Writes to the frame protocol have * no effect when the controller is enabled. */ - mchp_corespi_disable(spi); - control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); + control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT); control |= mode_val; @@ -380,21 +399,18 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) if (intfield == 0) return IRQ_NONE; - if (intfield & INT_TXDONE) { + if (intfield & INT_TXDONE) mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); + if (intfield & INT_RXRDY) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + if (spi->rx_len) mchp_corespi_read_fifo(spi); - - if (spi->tx_len) - mchp_corespi_write_fifo(spi); - - if (!spi->rx_len) - finalise = true; } - if (intfield & INT_RXRDY) - mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + if (!spi->rx_len && !spi->tx_len) + finalise = true; if (intfield & INT_RX_CHANNEL_OVERFLOW) { mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); @@ -479,8 +495,13 @@ static int mchp_corespi_transfer_one(struct spi_controller *host, mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH) ? FIFO_DEPTH : spi->tx_len); - if (spi->tx_len) + mchp_corespi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST); + + mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); + + while (spi->tx_len) mchp_corespi_write_fifo(spi); + return 1; } diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index e4cb22fe00..36c2f52cd6 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -748,7 +748,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) u32 cmd, reg_val, cnt, remainder, len; struct spi_controller *host = dev_id; struct mtk_spi *mdata = spi_controller_get_devdata(host); - struct spi_transfer *trans = mdata->cur_transfer; + struct spi_transfer *xfer = mdata->cur_transfer; reg_val = readl(mdata->base + SPI_STATUS0_REG); if (reg_val & MTK_SPI_PAUSE_INT_STATUS) @@ -762,42 +762,40 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } - if (!host->can_dma(host, NULL, trans)) { - if (trans->rx_buf) { + if (!host->can_dma(host, NULL, xfer)) { + if (xfer->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, - trans->rx_buf + mdata->num_xfered, cnt); + xfer->rx_buf + mdata->num_xfered, cnt); remainder = mdata->xfer_len % 4; if (remainder > 0) { reg_val = readl(mdata->base + SPI_RX_DATA_REG); - memcpy(trans->rx_buf + - mdata->num_xfered + - (cnt * 4), + memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered, ®_val, remainder); } } mdata->num_xfered += mdata->xfer_len; - if (mdata->num_xfered == trans->len) { + if (mdata->num_xfered == xfer->len) { spi_finalize_current_transfer(host); return IRQ_HANDLED; } - len = trans->len - mdata->num_xfered; + len = xfer->len - mdata->num_xfered; mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); mtk_spi_setup_packet(host); - if (trans->tx_buf) { + if (xfer->tx_buf) { cnt = mdata->xfer_len / 4; iowrite32_rep(mdata->base + SPI_TX_DATA_REG, - trans->tx_buf + mdata->num_xfered, cnt); + xfer->tx_buf + mdata->num_xfered, cnt); remainder = mdata->xfer_len % 4; if (remainder > 0) { reg_val = 0; memcpy(®_val, - trans->tx_buf + (cnt * 4) + mdata->num_xfered, + xfer->tx_buf + (cnt * 4) + mdata->num_xfered, remainder); writel(reg_val, mdata->base + SPI_TX_DATA_REG); } @@ -809,21 +807,21 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) } if (mdata->tx_sgl) - trans->tx_dma += mdata->xfer_len; + xfer->tx_dma += mdata->xfer_len; if (mdata->rx_sgl) - trans->rx_dma += mdata->xfer_len; + xfer->rx_dma += mdata->xfer_len; if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { mdata->tx_sgl = sg_next(mdata->tx_sgl); if (mdata->tx_sgl) { - trans->tx_dma = sg_dma_address(mdata->tx_sgl); + xfer->tx_dma = sg_dma_address(mdata->tx_sgl); mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); } } if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { mdata->rx_sgl = sg_next(mdata->rx_sgl); if (mdata->rx_sgl) { - trans->rx_dma = sg_dma_address(mdata->rx_sgl); + xfer->rx_dma = sg_dma_address(mdata->rx_sgl); mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); } } @@ -841,7 +839,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) mtk_spi_update_mdata_len(host); mtk_spi_setup_packet(host); - mtk_spi_setup_dma_addr(host, trans); + mtk_spi_setup_dma_addr(host, xfer); mtk_spi_enable_transfer(host); return IRQ_HANDLED; diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c index 4e9053d03d..3770b8e096 100644 --- a/drivers/spi/spi-mt7621.c +++ b/drivers/spi/spi-mt7621.c @@ -52,6 +52,8 @@ #define MT7621_CPOL BIT(4) #define MT7621_LSB_FIRST BIT(3) +#define MT7621_NATIVE_CS_COUNT 2 + struct mt7621_spi { struct spi_controller *host; void __iomem *base; @@ -75,10 +77,11 @@ static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val) iowrite32(val, rs->base + reg); } -static void mt7621_spi_set_cs(struct spi_device *spi, int enable) +static void mt7621_spi_set_native_cs(struct spi_device *spi, bool enable) { struct mt7621_spi *rs = spidev_to_mt7621_spi(spi); int cs = spi_get_chipselect(spi, 0); + bool active = spi->mode & SPI_CS_HIGH ? enable : !enable; u32 polar = 0; u32 host; @@ -94,7 +97,7 @@ static void mt7621_spi_set_cs(struct spi_device *spi, int enable) rs->pending_write = 0; - if (enable) + if (active) polar = BIT(cs); mt7621_spi_write(rs, MT7621_SPI_POLAR, polar); } @@ -154,6 +157,23 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs) return -ETIMEDOUT; } +static int mt7621_spi_prepare_message(struct spi_controller *host, + struct spi_message *m) +{ + struct mt7621_spi *rs = spi_controller_get_devdata(host); + struct spi_device *spi = m->spi; + unsigned int speed = spi->max_speed_hz; + struct spi_transfer *t = NULL; + + mt7621_spi_wait_till_ready(rs); + + list_for_each_entry(t, &m->transfers, transfer_list) + if (t->speed_hz < speed) + speed = t->speed_hz; + + return mt7621_spi_prepare(spi, speed); +} + static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs, int rx_len, u8 *buf) { @@ -243,59 +263,30 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs, } rs->pending_write = len; + mt7621_spi_flush(rs); } -static int mt7621_spi_transfer_one_message(struct spi_controller *host, - struct spi_message *m) +static int mt7621_spi_transfer_one(struct spi_controller *host, + struct spi_device *spi, + struct spi_transfer *t) { struct mt7621_spi *rs = spi_controller_get_devdata(host); - struct spi_device *spi = m->spi; - unsigned int speed = spi->max_speed_hz; - struct spi_transfer *t = NULL; - int status = 0; - - mt7621_spi_wait_till_ready(rs); - list_for_each_entry(t, &m->transfers, transfer_list) - if (t->speed_hz < speed) - speed = t->speed_hz; - - if (mt7621_spi_prepare(spi, speed)) { - status = -EIO; - goto msg_done; - } - - /* Assert CS */ - mt7621_spi_set_cs(spi, 1); - - m->actual_length = 0; - list_for_each_entry(t, &m->transfers, transfer_list) { - if ((t->rx_buf) && (t->tx_buf)) { - /* - * This controller will shift some extra data out - * of spi_opcode if (mosi_bit_cnt > 0) && - * (cmd_bit_cnt == 0). So the claimed full-duplex - * support is broken since we have no way to read - * the MISO value during that bit. - */ - status = -EIO; - goto msg_done; - } else if (t->rx_buf) { - mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf); - } else if (t->tx_buf) { - mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf); - } - m->actual_length += t->len; + if ((t->rx_buf) && (t->tx_buf)) { + /* + * This controller will shift some extra data out + * of spi_opcode if (mosi_bit_cnt > 0) && + * (cmd_bit_cnt == 0). So the claimed full-duplex + * support is broken since we have no way to read + * the MISO value during that bit. + */ + return -EIO; + } else if (t->rx_buf) { + mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf); + } else if (t->tx_buf) { + mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf); } - /* Flush data and deassert CS */ - mt7621_spi_flush(rs); - mt7621_spi_set_cs(spi, 0); - -msg_done: - m->status = status; - spi_finalize_current_message(host); - return 0; } @@ -353,10 +344,14 @@ static int mt7621_spi_probe(struct platform_device *pdev) host->mode_bits = SPI_LSB_FIRST; host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->setup = mt7621_spi_setup; - host->transfer_one_message = mt7621_spi_transfer_one_message; + host->prepare_message = mt7621_spi_prepare_message; + host->set_cs = mt7621_spi_set_native_cs; + host->transfer_one = mt7621_spi_transfer_one; host->bits_per_word_mask = SPI_BPW_MASK(8); host->dev.of_node = pdev->dev.of_node; - host->num_chipselect = 2; + host->max_native_cs = MT7621_NATIVE_CS_COUNT; + host->num_chipselect = MT7621_NATIVE_CS_COUNT; + host->use_gpio_descriptors = true; dev_set_drvdata(&pdev->dev, host); diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index a8bb07b38e..c02c420444 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -68,6 +68,8 @@ static int spi_mux_select(struct spi_device *spi) priv->current_cs = spi_get_chipselect(spi, 0); + spi_setup(priv->spi); + return 0; } diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index 6ea38f5e7d..7d8c5cd680 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -184,8 +184,6 @@ static irqreturn_t tiny_spi_irq(int irq, void *dev) } #ifdef CONFIG_OF -#include <linux/of_gpio.h> - static int tiny_spi_of_probe(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index ddf1c684bc..002f29dbce 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -131,6 +131,7 @@ struct omap2_mcspi { unsigned int pin_dir:1; size_t max_xfer_len; u32 ref_clk_hz; + bool use_multi_mode; }; struct omap2_mcspi_cs { @@ -256,10 +257,15 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) l = mcspi_cached_chconf0(spi); - if (enable) + /* Only enable chip select manually if single mode is used */ + if (mcspi->use_multi_mode) { l &= ~OMAP2_MCSPI_CHCONF_FORCE; - else - l |= OMAP2_MCSPI_CHCONF_FORCE; + } else { + if (enable) + l &= ~OMAP2_MCSPI_CHCONF_FORCE; + else + l |= OMAP2_MCSPI_CHCONF_FORCE; + } mcspi_write_chconf0(spi, l); @@ -283,7 +289,12 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr) l |= (OMAP2_MCSPI_MODULCTRL_MS); } else { l &= ~(OMAP2_MCSPI_MODULCTRL_MS); - l |= OMAP2_MCSPI_MODULCTRL_SINGLE; + + /* Enable single mode if needed */ + if (mcspi->use_multi_mode) + l &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; + else + l |= OMAP2_MCSPI_MODULCTRL_SINGLE; } mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l); @@ -1175,13 +1186,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, t->bits_per_word == spi->bits_per_word) par_override = 0; } - if (cd && cd->cs_per_word) { - chconf = mcspi->ctx.modulctrl; - chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); - mcspi->ctx.modulctrl = - mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); - } chconf = mcspi_cached_chconf0(spi); chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; @@ -1240,14 +1244,6 @@ out: status = omap2_mcspi_setup_transfer(spi, NULL); } - if (cd && cd->cs_per_word) { - chconf = mcspi->ctx.modulctrl; - chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); - mcspi->ctx.modulctrl = - mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); - } - omap2_mcspi_set_enable(spi, 0); if (spi_get_csgpiod(spi, 0)) @@ -1265,15 +1261,59 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs; + struct spi_transfer *tr; + u8 bits_per_word; + + /* + * The conditions are strict, it is mandatory to check each transfer of the list to see if + * multi-mode is applicable. + */ + mcspi->use_multi_mode = true; + list_for_each_entry(tr, &msg->transfers, transfer_list) { + if (!tr->bits_per_word) + bits_per_word = msg->spi->bits_per_word; + else + bits_per_word = tr->bits_per_word; - /* Only a single channel can have the FORCE bit enabled + /* + * Check if this transfer contains only one word; + */ + if (bits_per_word < 8 && tr->len == 1) { + /* multi-mode is applicable, only one word (1..7 bits) */ + } else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) { + /* multi-mode is applicable, only one word (8..32 bits) */ + } else { + /* multi-mode is not applicable: more than one word in the transfer */ + mcspi->use_multi_mode = false; + } + + /* Check if transfer asks to change the CS status after the transfer */ + if (!tr->cs_change) + mcspi->use_multi_mode = false; + + /* + * If at least one message is not compatible, switch back to single mode + * + * The bits_per_word of certain transfer can be different, but it will have no + * impact on the signal itself. + */ + if (!mcspi->use_multi_mode) + break; + } + + omap2_mcspi_set_mode(ctlr); + + /* In single mode only a single channel can have the FORCE bit enabled * in its chconf0 register. * Scan all channels and disable them except the current one. * A FORCE can remain from a last transfer having cs_change enabled + * + * In multi mode all FORCE bits must be disabled. */ list_for_each_entry(cs, &ctx->cs, node) { - if (msg->spi->controller_state == cs) + if (msg->spi->controller_state == cs && !mcspi->use_multi_mode) { continue; + } if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) { cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c index 3f1e5b2777..0031063a7e 100644 --- a/drivers/spi/spi-pic32-sqi.c +++ b/drivers/spi/spi-pic32-sqi.c @@ -344,7 +344,7 @@ static int pic32_sqi_one_message(struct spi_controller *host, struct spi_transfer *xfer; struct pic32_sqi *sqi; int ret = 0, mode; - unsigned long timeout; + unsigned long time_left; u32 val; sqi = spi_controller_get_devdata(host); @@ -410,8 +410,8 @@ static int pic32_sqi_one_message(struct spi_controller *host, writel(val, sqi->regs + PESQI_BD_CTRL_REG); /* wait for xfer completion */ - timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ); - if (timeout == 0) { + time_left = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ); + if (time_left == 0) { dev_err(&sqi->host->dev, "wait timedout/interrupted\n"); ret = -ETIMEDOUT; msg->status = ret; diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index 709edb70ad..b8bcc220e9 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -498,7 +498,7 @@ static int pic32_spi_one_transfer(struct spi_controller *host, { struct pic32_spi *pic32s; bool dma_issued = false; - unsigned long timeout; + unsigned long time_left; int ret; pic32s = spi_controller_get_devdata(host); @@ -545,8 +545,8 @@ static int pic32_spi_one_transfer(struct spi_controller *host, } /* wait for completion */ - timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ); - if (timeout == 0) { + time_left = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ); + if (time_left == 0) { dev_err(&spi->dev, "wait error/timedout\n"); if (dma_issued) { dmaengine_terminate_all(host->dma_rx); diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index be563f0dd0..08cb6e96ac 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -6,17 +6,22 @@ * Author: Mika Westerberg <mika.westerberg@linux.intel.com> */ -#include <linux/device.h> +#include <linux/atomic.h> +#include <linux/dev_printk.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> +#include <linux/errno.h> +#include <linux/irqreturn.h> #include <linux/scatterlist.h> -#include <linux/sizes.h> +#include <linux/string.h> +#include <linux/types.h> -#include <linux/spi/pxa2xx_spi.h> #include <linux/spi/spi.h> #include "spi-pxa2xx.h" +struct device; + static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, bool error) { @@ -63,8 +68,6 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, enum dma_transfer_direction dir, struct spi_transfer *xfer) { - struct chip_data *chip = - spi_get_ctldata(drv_data->controller->cur_msg->spi); enum dma_slave_buswidth width; struct dma_slave_config cfg; struct dma_chan *chan; @@ -89,14 +92,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = drv_data->ssp->phys_base + SSDR; cfg.dst_addr_width = width; - cfg.dst_maxburst = chip->dma_burst_size; + cfg.dst_maxburst = drv_data->controller_info->dma_burst_size; sgt = &xfer->tx_sg; chan = drv_data->controller->dma_tx; } else { cfg.src_addr = drv_data->ssp->phys_base + SSDR; cfg.src_addr_width = width; - cfg.src_maxburst = chip->dma_burst_size; + cfg.src_maxburst = drv_data->controller_info->dma_burst_size; sgt = &xfer->rx_sg; chan = drv_data->controller->dma_rx; @@ -220,24 +223,3 @@ void pxa2xx_spi_dma_release(struct driver_data *drv_data) controller->dma_tx = NULL; } } - -int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, - struct spi_device *spi, - u8 bits_per_word, u32 *burst_code, - u32 *threshold) -{ - struct pxa2xx_spi_chip *chip_info = spi->controller_data; - struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); - u32 dma_burst_size = drv_data->controller_info->dma_burst_size; - - /* - * If the DMA burst size is given in chip_info we use that, - * otherwise we use the default. Also we use the default FIFO - * thresholds for now. - */ - *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size; - *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) - | SSCR1_TxTresh(TX_THRESH_DFLT); - - return 0; -} diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 861b21c635..6d2efdb0e9 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -6,15 +6,21 @@ * Copyright (C) 2016, 2021 Intel Corporation */ #include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/err.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> - -#include <linux/spi/pxa2xx_spi.h> +#include <linux/property.h> +#include <linux/sprintf.h> +#include <linux/string.h> +#include <linux/types.h> #include <linux/dmaengine.h> #include <linux/platform_data/dma-dw.h> +#include "spi-pxa2xx.h" + #define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935 #define PCI_DEVICE_ID_INTEL_BYT 0x0f0e #define PCI_DEVICE_ID_INTEL_MRFLD 0x1194 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index f2a856f6a9..efe76d0c21 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -5,27 +5,29 @@ */ #include <linux/acpi.h> +#include <linux/atomic.h> #include <linux/bitops.h> +#include <linux/bug.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/ioport.h> -#include <linux/kernel.h> -#include <linux/module.h> +#include <linux/math64.h> +#include <linux/minmax.h> #include <linux/mod_devicetable.h> -#include <linux/of.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/slab.h> +#include <linux/types.h> -#include <linux/spi/pxa2xx_spi.h> #include <linux/spi/spi.h> #include "spi-pxa2xx.h" @@ -64,6 +66,14 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) +struct chip_data { + u32 cr1; + u32 dds_rate; + u32 threshold; + u16 lpss_rx_threshold; + u16 lpss_tx_threshold; +}; + #define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) #define LPSS_CS_CONTROL_SW_MODE BIT(0) #define LPSS_CS_CONTROL_CS_HIGH BIT(1) @@ -932,11 +942,11 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *xfer) { - struct chip_data *chip = spi_get_ctldata(spi); + struct driver_data *drv_data = spi_controller_get_devdata(controller); - return chip->enable_dma && + return drv_data->controller_info->enable_dma && xfer->len <= MAX_DMA_LEN && - xfer->len >= chip->dma_burst_size; + xfer->len >= drv_data->controller_info->dma_burst_size; } static int pxa2xx_spi_transfer_one(struct spi_controller *controller, @@ -944,11 +954,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, struct spi_transfer *transfer) { struct driver_data *drv_data = spi_controller_get_devdata(controller); - struct spi_message *message = controller->cur_msg; struct chip_data *chip = spi_get_ctldata(spi); - u32 dma_thresh = chip->dma_threshold; - u32 dma_burst = chip->dma_burst_size; u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data); + u32 dma_thresh; u32 clk_div; u8 bits; u32 speed; @@ -958,17 +966,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, int dma_mapped; /* Check if we can DMA this transfer */ - if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { - - /* Reject already-mapped transfers; PIO won't always work */ - if (message->is_dma_mapped - || transfer->rx_dma || transfer->tx_dma) { - dev_err(&spi->dev, - "Mapped transfer length of %u is greater than %d\n", - transfer->len, MAX_DMA_LEN); - return -EINVAL; - } - + if (transfer->len > MAX_DMA_LEN && drv_data->controller_info->enable_dma) { /* Warn ... we force this to PIO mode */ dev_warn_ratelimited(&spi->dev, "DMA disabled for transfer length %u greater than %d\n", @@ -1004,19 +1002,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, drv_data->read = drv_data->rx ? u32_reader : null_reader; drv_data->write = drv_data->tx ? u32_writer : null_writer; } - /* - * If bits per word is changed in DMA mode, then must check - * the thresholds and burst also. - */ - if (chip->enable_dma) { - if (pxa2xx_spi_set_dma_burst_and_threshold(chip, - spi, - bits, &dma_burst, - &dma_thresh)) - dev_warn_ratelimited(&spi->dev, - "DMA burst size reduced to match bits_per_word\n"); - } + dma_thresh = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); dma_mapped = controller->can_dma && controller->can_dma(controller, spi, transfer) && controller->cur_msg_mapped; @@ -1079,7 +1066,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, pxa_ssp_disable(drv_data->ssp); if (!pxa25x_ssp_comp(drv_data)) - pxa2xx_spi_write(drv_data, SSTO, chip->timeout); + pxa2xx_spi_write(drv_data, SSTO, TIMOUT_DFLT); /* First set CR1 without interrupt and service enables */ pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1); @@ -1163,7 +1150,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) static int setup(struct spi_device *spi) { - struct pxa2xx_spi_chip *chip_info; struct chip_data *chip; const struct lpss_config *config; struct driver_data *drv_data = @@ -1209,42 +1195,19 @@ static int setup(struct spi_device *spi) break; } + if (drv_data->ssp_type == CE4100_SSP) { + if (spi_get_chipselect(spi, 0) > 4) { + dev_err(&spi->dev, "failed setup: cs number must not be > 4.\n"); + return -EINVAL; + } + } + /* Only allocate on the first setup */ chip = spi_get_ctldata(spi); if (!chip) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; - - if (drv_data->ssp_type == CE4100_SSP) { - if (spi_get_chipselect(spi, 0) > 4) { - dev_err(&spi->dev, - "failed setup: cs number must not be > 4.\n"); - kfree(chip); - return -EINVAL; - } - } - chip->enable_dma = drv_data->controller_info->enable_dma; - chip->timeout = TIMOUT_DFLT; - } - - /* - * Protocol drivers may change the chip settings, so... - * if chip_info exists, use it. - */ - chip_info = spi->controller_data; - - /* chip_info isn't always needed */ - if (chip_info) { - if (chip_info->timeout) - chip->timeout = chip_info->timeout; - if (chip_info->tx_threshold) - tx_thres = chip_info->tx_threshold; - if (chip_info->tx_hi_threshold) - tx_hi_thres = chip_info->tx_hi_threshold; - if (chip_info->rx_threshold) - rx_thres = chip_info->rx_threshold; - chip->dma_threshold = 0; } chip->cr1 = 0; @@ -1266,25 +1229,6 @@ static int setup(struct spi_device *spi) chip->lpss_tx_threshold = tx_thres; } - /* - * Set DMA burst and threshold outside of chip_info path so that if - * chip_info goes away after setting chip->enable_dma, the burst and - * threshold can still respond to changes in bits_per_word. - */ - if (chip->enable_dma) { - /* Set up legal burst and threshold for DMA */ - if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, - spi->bits_per_word, - &chip->dma_burst_size, - &chip->dma_threshold)) { - dev_warn(&spi->dev, - "in setup: DMA burst size reduced to match bits_per_word\n"); - } - dev_dbg(&spi->dev, - "in setup: DMA burst size set to %u\n", - chip->dma_burst_size); - } - switch (drv_data->ssp_type) { case QUARK_X1000_SSP: chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres) @@ -1326,19 +1270,52 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) return param == chan->device->dev; } +static int +pxa2xx_spi_init_ssp(struct platform_device *pdev, struct ssp_device *ssp, enum pxa_ssp_type type) +{ + struct device *dev = &pdev->dev; + struct resource *res; + int status; + u64 uid; + + ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ssp->mmio_base)) + return PTR_ERR(ssp->mmio_base); + + ssp->phys_base = res->start; + + ssp->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ssp->clk)) + return PTR_ERR(ssp->clk); + + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) + return ssp->irq; + + ssp->type = type; + ssp->dev = dev; + + status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); + if (status) + ssp->port_id = -1; + else + ssp->port_id = uid; + + return 0; +} + static struct pxa2xx_spi_controller * pxa2xx_spi_init_pdata(struct platform_device *pdev) { struct pxa2xx_spi_controller *pdata; struct device *dev = &pdev->dev; struct device *parent = dev->parent; - struct ssp_device *ssp; - struct resource *res; enum pxa_ssp_type type = SSP_UNDEFINED; + struct ssp_device *ssp = NULL; const void *match; bool is_lpss_priv; + u32 num_cs = 1; int status; - u64 uid; is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv"); @@ -1353,6 +1330,12 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) return ERR_PTR(status); type = (enum pxa_ssp_type)value; + } else { + ssp = pxa_ssp_request(pdev->id, pdev->name); + if (ssp) { + type = ssp->type; + pxa_ssp_free(ssp); + } } /* Validate the SSP type correctness */ @@ -1363,14 +1346,6 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) if (!pdata) return ERR_PTR(-ENOMEM); - ssp = &pdata->ssp; - - ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(ssp->mmio_base)) - return ERR_CAST(ssp->mmio_base); - - ssp->phys_base = res->start; - /* Platforms with iDMA 64-bit */ if (is_lpss_priv) { pdata->tx_param = parent; @@ -1378,28 +1353,22 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) pdata->dma_filter = pxa2xx_spi_idma_filter; } - ssp->clk = devm_clk_get(dev, NULL); - if (IS_ERR(ssp->clk)) - return ERR_CAST(ssp->clk); - - ssp->irq = platform_get_irq(pdev, 0); - if (ssp->irq < 0) - return ERR_PTR(ssp->irq); - - ssp->type = type; - ssp->dev = dev; - - status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); - if (status) - ssp->port_id = -1; - else - ssp->port_id = uid; + /* Read number of chip select pins, if provided */ + device_property_read_u32(dev, "num-cs", &num_cs); + pdata->num_chipselect = num_cs; pdata->is_target = device_property_read_bool(dev, "spi-slave"); - pdata->num_chipselect = 1; pdata->enable_dma = true; pdata->dma_burst_size = 1; + /* If SSP has been already enumerated, use it */ + if (ssp) + return pdata; + + status = pxa2xx_spi_init_ssp(pdev, &pdata->ssp, type); + if (status) + return ERR_PTR(status); + return pdata; } @@ -1446,20 +1415,17 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info = dev_get_platdata(dev); if (!platform_info) { platform_info = pxa2xx_spi_init_pdata(pdev); - if (IS_ERR(platform_info)) { - dev_err(&pdev->dev, "missing platform data\n"); - return PTR_ERR(platform_info); - } + if (IS_ERR(platform_info)) + return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n"); } + dev_dbg(dev, "DMA burst size set to %u\n", platform_info->dma_burst_size); ssp = pxa_ssp_request(pdev->id, pdev->name); if (!ssp) ssp = &platform_info->ssp; - if (!ssp->mmio_base) { - dev_err(&pdev->dev, "failed to get SSP\n"); - return -ENODEV; - } + if (!ssp->mmio_base) + return dev_err_probe(dev, -ENODEV, "failed to get SSP\n"); if (platform_info->is_target) controller = devm_spi_alloc_target(dev, sizeof(*drv_data)); @@ -1467,8 +1433,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) controller = devm_spi_alloc_host(dev, sizeof(*drv_data)); if (!controller) { - dev_err(&pdev->dev, "cannot alloc spi_controller\n"); - status = -ENOMEM; + status = dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n"); goto out_error_controller_alloc; } drv_data = spi_controller_get_devdata(controller); @@ -1522,7 +1487,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), drv_data); if (status < 0) { - dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); + dev_err_probe(dev, status, "cannot get IRQ %d\n", ssp->irq); goto out_error_controller_alloc; } @@ -1638,7 +1603,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drv_data); status = spi_register_controller(controller); if (status) { - dev_err(&pdev->dev, "problem registering SPI controller\n"); + dev_err_probe(dev, status, "problem registering SPI controller\n"); goto out_error_pm_runtime_enabled; } @@ -1741,7 +1706,6 @@ static const struct dev_pm_ops pxa2xx_spi_pm_ops = { RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, pxa2xx_spi_runtime_resume, NULL) }; -#ifdef CONFIG_ACPI static const struct acpi_device_id pxa2xx_spi_acpi_match[] = { { "80860F0E", LPSS_BYT_SSP }, { "8086228E", LPSS_BSW_SSP }, @@ -1752,9 +1716,8 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = { {} }; MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); -#endif -static const struct of_device_id pxa2xx_spi_of_match[] __maybe_unused = { +static const struct of_device_id pxa2xx_spi_of_match[] = { { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP }, {} }; @@ -1764,8 +1727,8 @@ static struct platform_driver driver = { .driver = { .name = "pxa2xx-spi", .pm = pm_ptr(&pxa2xx_spi_pm_ops), - .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match), - .of_match_table = of_match_ptr(pxa2xx_spi_of_match), + .acpi_match_table = pxa2xx_spi_acpi_match, + .of_match_table = pxa2xx_spi_of_match, }, .probe = pxa2xx_spi_probe, .remove_new = pxa2xx_spi_remove, diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 45cdbbc71c..93e1e471e1 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -7,15 +7,34 @@ #ifndef SPI_PXA2XX_H #define SPI_PXA2XX_H -#include <linux/interrupt.h> -#include <linux/io.h> +#include <linux/dmaengine.h> +#include <linux/irqreturn.h> #include <linux/types.h> #include <linux/sizes.h> #include <linux/pxa2xx_ssp.h> struct gpio_desc; -struct pxa2xx_spi_controller; + +/* + * The platform data for SSP controller devices + * (resides in device.platform_data). + */ +struct pxa2xx_spi_controller { + u8 num_chipselect; + u8 enable_dma; + u8 dma_burst_size; + bool is_target; + + /* DMA engine specific config */ + dma_filter_fn dma_filter; + void *tx_param; + void *rx_param; + + /* For non-PXA arches */ + struct ssp_device ssp; +}; + struct spi_controller; struct spi_device; struct spi_transfer; @@ -56,18 +75,6 @@ struct driver_data { struct gpio_desc *gpiod_ready; }; -struct chip_data { - u32 cr1; - u32 dds_rate; - u32 timeout; - u8 enable_dma; - u32 dma_burst_size; - u32 dma_threshold; - u32 threshold; - u16 lpss_rx_threshold; - u16 lpss_tx_threshold; -}; - static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg) { return pxa_ssp_read_reg(drv_data->ssp, reg); @@ -123,10 +130,5 @@ extern void pxa2xx_spi_dma_start(struct driver_data *drv_data); extern void pxa2xx_spi_dma_stop(struct driver_data *drv_data); extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); -extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, - struct spi_device *spi, - u8 bits_per_word, - u32 *burst_code, - u32 *threshold); #endif /* SPI_PXA2XX_H */ diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 8e81f1a862..7f95d22fb1 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -24,7 +24,6 @@ #include <linux/reset.h> #include <linux/sh_dma.h> #include <linux/spi/spi.h> -#include <linux/spi/rspi.h> #include <linux/spinlock.h> #define RSPI_SPCR 0x00 /* Control Register */ @@ -1131,16 +1130,12 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, const struct resource *res) { - const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); unsigned int dma_tx_id, dma_rx_id; if (dev->of_node) { /* In the OF case we will get the slave IDs from the DT */ dma_tx_id = 0; dma_rx_id = 0; - } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) { - dma_tx_id = rspi_pd->dma_tx_id; - dma_rx_id = rspi_pd->dma_rx_id; } else { /* The driver assumes no error. */ return 0; @@ -1290,7 +1285,6 @@ static int rspi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct rspi_data *rspi; int ret; - const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; unsigned long clksrc; @@ -1305,11 +1299,7 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } else { ops = (struct spi_ops *)pdev->id_entry->driver_data; - rspi_pd = dev_get_platdata(&pdev->dev); - if (rspi_pd && rspi_pd->num_chipselect) - ctlr->num_chipselect = rspi_pd->num_chipselect; - else - ctlr->num_chipselect = 2; /* default */ + ctlr->num_chipselect = 2; /* default */ } rspi = spi_controller_get_devdata(ctlr); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index f726d86704..833c58c88e 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -950,7 +950,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_target_ctrldata( struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs; - struct device_node *target_np, *data_np = NULL; + struct device_node *target_np; u32 fb_delay = 0; target_np = spi->dev.of_node; @@ -963,7 +963,8 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_target_ctrldata( if (!cs) return ERR_PTR(-ENOMEM); - data_np = of_get_child_by_name(target_np, "controller-data"); + struct device_node *data_np __free(device_node) = + of_get_child_by_name(target_np, "controller-data"); if (!data_np) { dev_info(&spi->dev, "feedback delay set to default (0)\n"); return cs; @@ -971,7 +972,6 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_target_ctrldata( of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay); cs->fb_delay = fb_delay; - of_node_put(data_np); return cs; } diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 11d8bd27b3..2ee6755b43 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -206,7 +206,8 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, struct spi_transfer *tfr) { struct sun4i_spi *sspi = spi_controller_get_devdata(host); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div; + unsigned long time_left; unsigned int start, end, tx_time; unsigned int tx_len = 0; int ret = 0; @@ -327,10 +328,10 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); start = jiffies; - timeout = wait_for_completion_timeout(&sspi->done, - msecs_to_jiffies(tx_time)); + time_left = wait_for_completion_timeout(&sspi->done, + msecs_to_jiffies(tx_time)); end = jiffies; - if (!timeout) { + if (!time_left) { dev_warn(&host->dev, "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", dev_name(&spi->dev), tfr->len, tfr->speed_hz, diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index cd018ea1ab..5c26bf0562 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -277,7 +277,8 @@ static int sun6i_spi_transfer_one(struct spi_controller *host, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_controller_get_devdata(host); - unsigned int div, div_cdr1, div_cdr2, timeout; + unsigned int div, div_cdr1, div_cdr2; + unsigned long time_left; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0, rx_len = 0, nbits = 0; @@ -488,26 +489,26 @@ static int sun6i_spi_transfer_one(struct spi_controller *host, tx_time = spi_controller_xfer_timeout(host, tfr); start = jiffies; - timeout = wait_for_completion_timeout(&sspi->done, - msecs_to_jiffies(tx_time)); + time_left = wait_for_completion_timeout(&sspi->done, + msecs_to_jiffies(tx_time)); if (!use_dma) { sun6i_spi_drain_fifo(sspi); } else { - if (timeout && rx_len) { + if (time_left && rx_len) { /* * Even though RX on the peripheral side has finished * RX DMA might still be in flight */ - timeout = wait_for_completion_timeout(&sspi->dma_rx_done, - timeout); - if (!timeout) + time_left = wait_for_completion_timeout(&sspi->dma_rx_done, + time_left); + if (!time_left) dev_warn(&host->dev, "RX DMA timeout\n"); } } end = jiffies; - if (!timeout) { + if (!time_left) { dev_warn(&host->dev, "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", dev_name(&spi->dev), tfr->len, tfr->speed_hz, diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c index 49302364b7..2fec18b684 100644 --- a/drivers/spi/spi-xlp.c +++ b/drivers/spi/spi-xlp.c @@ -270,7 +270,7 @@ static int xlp_spi_xfer_block(struct xlp_spi_priv *xs, const unsigned char *tx_buf, unsigned char *rx_buf, int xfer_len, int cmd_cont) { - int timeout; + unsigned long time_left; u32 intr_mask = 0; xs->tx_buf = tx_buf; @@ -299,11 +299,11 @@ static int xlp_spi_xfer_block(struct xlp_spi_priv *xs, intr_mask |= XLP_SPI_INTR_DONE; xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask); - timeout = wait_for_completion_timeout(&xs->done, - msecs_to_jiffies(1000)); + time_left = wait_for_completion_timeout(&xs->done, + msecs_to_jiffies(1000)); /* Disable interrupts */ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0); - if (!timeout) { + if (!time_left) { dev_err(&xs->dev, "xfer timedout!\n"); goto out; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fcc39523d6..0f04e832f9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -312,7 +312,7 @@ static const struct attribute_group *spi_master_groups[] = { static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, struct spi_transfer *xfer, - struct spi_controller *ctlr) + struct spi_message *msg) { int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; struct spi_statistics *stats; @@ -328,11 +328,9 @@ static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pc u64_stats_inc(&stats->transfer_bytes_histo[l2len]); u64_stats_add(&stats->bytes, xfer->len); - if ((xfer->tx_buf) && - (xfer->tx_buf != ctlr->dummy_tx)) + if (spi_valid_txbuf(msg, xfer)) u64_stats_add(&stats->bytes_tx, xfer->len); - if ((xfer->rx_buf) && - (xfer->rx_buf != ctlr->dummy_rx)) + if (spi_valid_rxbuf(msg, xfer)) u64_stats_add(&stats->bytes_rx, xfer->len); u64_stats_update_end(&stats->syncp); @@ -597,10 +595,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device); static void spi_dev_set_name(struct spi_device *spi) { - struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + struct device *dev = &spi->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); - if (adev) { - dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + if (is_acpi_device_node(fwnode)) { + dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); + return; + } + + if (is_software_node(fwnode)) { + dev_set_name(dev, "spi-%pfwP", fwnode); return; } @@ -824,14 +828,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, proxy->controller_data = chip->controller_data; proxy->controller_state = NULL; /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - proxy->cs_index_mask = 0x01; + proxy->cs_index_mask = BIT(0); if (chip->swnode) { status = device_add_software_node(&proxy->dev, chip->swnode); @@ -1024,20 +1024,45 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes } /*-------------------------------------------------------------------------*/ +#define spi_for_each_valid_cs(spi, idx) \ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ + if (!(spi->cs_index_mask & BIT(idx))) {} else + static inline bool spi_is_last_cs(struct spi_device *spi) { u8 idx; bool last = false; - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if (spi->cs_index_mask & BIT(idx)) { - if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) - last = true; - } + spi_for_each_valid_cs(spi, idx) { + if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) + last = true; } return last; } +static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) +{ + /* + * Historically ACPI has no means of the GPIO polarity and + * thus the SPISerialBus() resource defines it on the per-chip + * basis. In order to avoid a chain of negations, the GPIO + * polarity is considered being Active High. Even for the cases + * when _DSD() is involved (in the updated versions of ACPI) + * the GPIO CS polarity must be defined Active High to avoid + * ambiguity. That's why we use enable, that takes SPI_CS_HIGH + * into account. + */ + if (has_acpi_companion(&spi->dev)) + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); + else + /* Polarity handled by GPIO library */ + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); + + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); +} static void spi_set_cs(struct spi_device *spi, bool enable, bool force) { @@ -1074,31 +1099,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) if (spi_is_csgpiod(spi)) { if (!(spi->mode & SPI_NO_CS)) { - /* - * Historically ACPI has no means of the GPIO polarity and - * thus the SPISerialBus() resource defines it on the per-chip - * basis. In order to avoid a chain of negations, the GPIO - * polarity is considered being Active High. Even for the cases - * when _DSD() is involved (in the updated versions of ACPI) - * the GPIO CS polarity must be defined Active High to avoid - * ambiguity. That's why we use enable, that takes SPI_CS_HIGH - * into account. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) { - if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - !enable); - else - /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - activate); - - if (activate) - spi_delay_exec(&spi->cs_setup, NULL); - else - spi_delay_exec(&spi->cs_inactive, NULL); - } + spi_for_each_valid_cs(spi, idx) { + if (spi_get_csgpiod(spi, idx)) + spi_toggle_csgpiod(spi, idx, enable, activate); } } /* Some SPI masters need both GPIO CS & slave_select */ @@ -1207,12 +1210,10 @@ static void spi_unmap_buf_attrs(struct spi_controller *ctlr, enum dma_data_direction dir, unsigned long attrs) { - if (sgt->orig_nents) { - dma_unmap_sgtable(dev, sgt, dir, attrs); - sg_free_table(sgt); - sgt->orig_nents = 0; - sgt->nents = 0; - } + dma_unmap_sgtable(dev, sgt, dir, attrs); + sg_free_table(sgt); + sgt->orig_nents = 0; + sgt->nents = 0; } void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, @@ -1221,6 +1222,11 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); } +/* Dummy SG for unidirect transfers */ +static struct scatterlist dummy_sg = { + .page_link = SG_END, +}; + static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; @@ -1259,6 +1265,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) attrs); if (ret != 0) return ret; + } else { + xfer->tx_sg.sgl = &dummy_sg; } if (xfer->rx_buf != NULL) { @@ -1272,6 +1280,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) return ret; } + } else { + xfer->rx_sg.sgl = &dummy_sg; } } /* No transfer has been mapped, bail out with success */ @@ -1312,7 +1322,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) return 0; } -static void spi_dma_sync_for_device(struct spi_controller *ctlr, +static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1321,13 +1331,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; - if (xfer->tx_sg.orig_nents) - dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); - if (xfer->rx_sg.orig_nents) - dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } -static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, +static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1336,10 +1347,11 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; - if (xfer->rx_sg.orig_nents) - dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - if (xfer->tx_sg.orig_nents) - dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ static inline int __spi_map_msg(struct spi_controller *ctlr, @@ -1355,11 +1367,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr, } static void spi_dma_sync_for_device(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } @@ -1619,8 +1633,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); - spi_statistics_add_transfer_stats(statm, xfer, ctlr); - spi_statistics_add_transfer_stats(stats, xfer, ctlr); + spi_statistics_add_transfer_stats(statm, xfer, msg); + spi_statistics_add_transfer_stats(stats, xfer, msg); if (!ctlr->ptp_sts_supported) { xfer->ptp_sts_word_pre = 0; @@ -1631,10 +1645,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, reinit_completion(&ctlr->xfer_completion); fallback_pio: - spi_dma_sync_for_device(ctlr, xfer); + spi_dma_sync_for_device(ctlr, msg, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); if (ctlr->cur_msg_mapped && (xfer->error & SPI_TRANS_FAIL_NO_START)) { @@ -1659,7 +1673,7 @@ fallback_pio: msg->status = ret; } - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, @@ -3716,9 +3730,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, * to the same values as *xferp, so tx_buf, rx_buf and len * are all identical (as well as most others) * so we just have to fix up len and the pointers. - * - * This also includes support for the depreciated - * spi_message.is_dma_mapped interface. */ /* @@ -3732,12 +3743,8 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, /* Update rx_buf, tx_buf and DMA */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; - if (xfers[i].rx_dma) - xfers[i].rx_dma += offset; if (xfers[i].tx_buf) xfers[i].tx_buf += offset; - if (xfers[i].tx_dma) - xfers[i].tx_dma += offset; /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 95fb5f1c91..05e6d007f9 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -734,6 +734,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "lwn,bk4", .data = &spidev_of_check }, { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, { .compatible = "micron,spi-authenta", .data = &spidev_of_check }, + { .compatible = "rohm,bh2228fv", .data = &spidev_of_check }, { .compatible = "rohm,dh2228fv", .data = &spidev_of_check }, { .compatible = "semtech,sx1301", .data = &spidev_of_check }, { .compatible = "silabs,em3581", .data = &spidev_of_check }, |