From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/net/can/m_can/Kconfig | 35 + drivers/net/can/m_can/Makefile | 13 + drivers/net/can/m_can/m_can.c | 2169 +++++++++++++++++++++++++++++++ drivers/net/can/m_can/m_can.h | 110 ++ drivers/net/can/m_can/m_can_pci.c | 206 +++ drivers/net/can/m_can/m_can_platform.c | 245 ++++ drivers/net/can/m_can/tcan4x5x-core.c | 522 ++++++++ drivers/net/can/m_can/tcan4x5x-regmap.c | 165 +++ drivers/net/can/m_can/tcan4x5x.h | 56 + 9 files changed, 3521 insertions(+) create mode 100644 drivers/net/can/m_can/Kconfig create mode 100644 drivers/net/can/m_can/Makefile create mode 100644 drivers/net/can/m_can/m_can.c create mode 100644 drivers/net/can/m_can/m_can.h create mode 100644 drivers/net/can/m_can/m_can_pci.c create mode 100644 drivers/net/can/m_can/m_can_platform.c create mode 100644 drivers/net/can/m_can/tcan4x5x-core.c create mode 100644 drivers/net/can/m_can/tcan4x5x-regmap.c create mode 100644 drivers/net/can/m_can/tcan4x5x.h (limited to 'drivers/net/can/m_can') diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig new file mode 100644 index 0000000000..fc2afab362 --- /dev/null +++ b/drivers/net/can/m_can/Kconfig @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig CAN_M_CAN + tristate "Bosch M_CAN support" + select CAN_RX_OFFLOAD + help + Say Y here if you want support for Bosch M_CAN controller framework. + This is common support for devices that embed the Bosch M_CAN IP. + +if CAN_M_CAN + +config CAN_M_CAN_PCI + tristate "Generic PCI Bus based M_CAN driver" + depends on PCI + help + Say Y here if you want to support Bosch M_CAN controller connected + to the pci bus. + +config CAN_M_CAN_PLATFORM + tristate "Bosch M_CAN support for io-mapped devices" + depends on HAS_IOMEM + help + Say Y here if you want support for IO Mapped Bosch M_CAN controller. + This support is for devices that have the Bosch M_CAN controller + IP embedded into the device and the IP is IO Mapped to the processor. + +config CAN_M_CAN_TCAN4X5X + depends on SPI + select REGMAP_SPI + tristate "TCAN4X5X M_CAN device" + help + Say Y here if you want support for Texas Instruments TCAN4x5x + M_CAN controller. This device is a peripheral device that uses the + SPI bus for communication. + +endif diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile new file mode 100644 index 0000000000..d717bbc9e0 --- /dev/null +++ b/drivers/net/can/m_can/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Bosch M_CAN controller driver. +# + +obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o +obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o +obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o + +tcan4x5x-objs := +tcan4x5x-objs += tcan4x5x-core.o +tcan4x5x-objs += tcan4x5x-regmap.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c new file mode 100644 index 0000000000..16ecc11c7f --- /dev/null +++ b/drivers/net/can/m_can/m_can.c @@ -0,0 +1,2169 @@ +// SPDX-License-Identifier: GPL-2.0 +// CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +/* Bosch M_CAN user manual can be obtained from: + * https://github.com/linux-can/can-doc/tree/master/m_can + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "m_can.h" + +/* registers definition */ +enum m_can_reg { + M_CAN_CREL = 0x0, + M_CAN_ENDN = 0x4, + M_CAN_CUST = 0x8, + M_CAN_DBTP = 0xc, + M_CAN_TEST = 0x10, + M_CAN_RWD = 0x14, + M_CAN_CCCR = 0x18, + M_CAN_NBTP = 0x1c, + M_CAN_TSCC = 0x20, + M_CAN_TSCV = 0x24, + M_CAN_TOCC = 0x28, + M_CAN_TOCV = 0x2c, + M_CAN_ECR = 0x40, + M_CAN_PSR = 0x44, + /* TDCR Register only available for version >=3.1.x */ + M_CAN_TDCR = 0x48, + M_CAN_IR = 0x50, + M_CAN_IE = 0x54, + M_CAN_ILS = 0x58, + M_CAN_ILE = 0x5c, + M_CAN_GFC = 0x80, + M_CAN_SIDFC = 0x84, + M_CAN_XIDFC = 0x88, + M_CAN_XIDAM = 0x90, + M_CAN_HPMS = 0x94, + M_CAN_NDAT1 = 0x98, + M_CAN_NDAT2 = 0x9c, + M_CAN_RXF0C = 0xa0, + M_CAN_RXF0S = 0xa4, + M_CAN_RXF0A = 0xa8, + M_CAN_RXBC = 0xac, + M_CAN_RXF1C = 0xb0, + M_CAN_RXF1S = 0xb4, + M_CAN_RXF1A = 0xb8, + M_CAN_RXESC = 0xbc, + M_CAN_TXBC = 0xc0, + M_CAN_TXFQS = 0xc4, + M_CAN_TXESC = 0xc8, + M_CAN_TXBRP = 0xcc, + M_CAN_TXBAR = 0xd0, + M_CAN_TXBCR = 0xd4, + M_CAN_TXBTO = 0xd8, + M_CAN_TXBCF = 0xdc, + M_CAN_TXBTIE = 0xe0, + M_CAN_TXBCIE = 0xe4, + M_CAN_TXEFC = 0xf0, + M_CAN_TXEFS = 0xf4, + M_CAN_TXEFA = 0xf8, +}; + +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 + +/* Core Release Register (CREL) */ +#define CREL_REL_MASK GENMASK(31, 28) +#define CREL_STEP_MASK GENMASK(27, 24) +#define CREL_SUBSTEP_MASK GENMASK(23, 20) + +/* Data Bit Timing & Prescaler Register (DBTP) */ +#define DBTP_TDC BIT(23) +#define DBTP_DBRP_MASK GENMASK(20, 16) +#define DBTP_DTSEG1_MASK GENMASK(12, 8) +#define DBTP_DTSEG2_MASK GENMASK(7, 4) +#define DBTP_DSJW_MASK GENMASK(3, 0) + +/* Transmitter Delay Compensation Register (TDCR) */ +#define TDCR_TDCO_MASK GENMASK(14, 8) +#define TDCR_TDCF_MASK GENMASK(6, 0) + +/* Test Register (TEST) */ +#define TEST_LBCK BIT(4) + +/* CC Control Register (CCCR) */ +#define CCCR_TXP BIT(14) +#define CCCR_TEST BIT(7) +#define CCCR_DAR BIT(6) +#define CCCR_MON BIT(5) +#define CCCR_CSR BIT(4) +#define CCCR_CSA BIT(3) +#define CCCR_ASM BIT(2) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) +/* for version 3.0.x */ +#define CCCR_CMR_MASK GENMASK(11, 10) +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK GENMASK(9, 8) +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 +/* for version >=3.1.x */ +#define CCCR_EFBI BIT(13) +#define CCCR_PXHD BIT(12) +#define CCCR_BRSE BIT(9) +#define CCCR_FDOE BIT(8) +/* for version >=3.2.x */ +#define CCCR_NISO BIT(15) +/* for version >=3.3.x */ +#define CCCR_WMM BIT(11) +#define CCCR_UTSU BIT(10) + +/* Nominal Bit Timing & Prescaler Register (NBTP) */ +#define NBTP_NSJW_MASK GENMASK(31, 25) +#define NBTP_NBRP_MASK GENMASK(24, 16) +#define NBTP_NTSEG1_MASK GENMASK(15, 8) +#define NBTP_NTSEG2_MASK GENMASK(6, 0) + +/* Timestamp Counter Configuration Register (TSCC) */ +#define TSCC_TCP_MASK GENMASK(19, 16) +#define TSCC_TSS_MASK GENMASK(1, 0) +#define TSCC_TSS_DISABLE 0x0 +#define TSCC_TSS_INTERNAL 0x1 +#define TSCC_TSS_EXTERNAL 0x2 + +/* Timestamp Counter Value Register (TSCV) */ +#define TSCV_TSC_MASK GENMASK(15, 0) + +/* Error Counter Register (ECR) */ +#define ECR_RP BIT(15) +#define ECR_REC_MASK GENMASK(14, 8) +#define ECR_TEC_MASK GENMASK(7, 0) + +/* Protocol Status Register (PSR) */ +#define PSR_BO BIT(7) +#define PSR_EW BIT(6) +#define PSR_EP BIT(5) +#define PSR_LEC_MASK GENMASK(2, 0) +#define PSR_DLEC_MASK GENMASK(10, 8) + +/* Interrupt Register (IR) */ +#define IR_ALL_INT 0xffffffff + +/* Renamed bits for versions > 3.1.x */ +#define IR_ARA BIT(29) +#define IR_PED BIT(28) +#define IR_PEA BIT(27) + +/* Bits for version 3.0.x */ +#define IR_STE BIT(31) +#define IR_FOE BIT(30) +#define IR_ACKE BIT(29) +#define IR_BE BIT(28) +#define IR_CRCE BIT(27) +#define IR_WDI BIT(26) +#define IR_BO BIT(25) +#define IR_EW BIT(24) +#define IR_EP BIT(23) +#define IR_ELO BIT(22) +#define IR_BEU BIT(21) +#define IR_BEC BIT(20) +#define IR_DRX BIT(19) +#define IR_TOO BIT(18) +#define IR_MRAF BIT(17) +#define IR_TSW BIT(16) +#define IR_TEFL BIT(15) +#define IR_TEFF BIT(14) +#define IR_TEFW BIT(13) +#define IR_TEFN BIT(12) +#define IR_TFE BIT(11) +#define IR_TCF BIT(10) +#define IR_TC BIT(9) +#define IR_HPM BIT(8) +#define IR_RF1L BIT(7) +#define IR_RF1F BIT(6) +#define IR_RF1W BIT(5) +#define IR_RF1N BIT(4) +#define IR_RF0L BIT(3) +#define IR_RF0F BIT(2) +#define IR_RF0W BIT(1) +#define IR_RF0N BIT(0) +#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) + +/* Interrupts for version 3.0.x */ +#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) +#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) + +/* Interrupts for version >= 3.1.x */ +#define IR_ERR_LEC_31X (IR_PED | IR_PEA) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) +#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) + +/* Interrupt Line Select (ILS) */ +#define ILS_ALL_INT0 0x0 +#define ILS_ALL_INT1 0xFFFFFFFF + +/* Interrupt Line Enable (ILE) */ +#define ILE_EINT1 BIT(1) +#define ILE_EINT0 BIT(0) + +/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ +#define RXFC_FWM_MASK GENMASK(30, 24) +#define RXFC_FS_MASK GENMASK(22, 16) + +/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ +#define RXFS_RFL BIT(25) +#define RXFS_FF BIT(24) +#define RXFS_FPI_MASK GENMASK(21, 16) +#define RXFS_FGI_MASK GENMASK(13, 8) +#define RXFS_FFL_MASK GENMASK(6, 0) + +/* Rx Buffer / FIFO Element Size Configuration (RXESC) */ +#define RXESC_RBDS_MASK GENMASK(10, 8) +#define RXESC_F1DS_MASK GENMASK(6, 4) +#define RXESC_F0DS_MASK GENMASK(2, 0) +#define RXESC_64B 0x7 + +/* Tx Buffer Configuration (TXBC) */ +#define TXBC_TFQS_MASK GENMASK(29, 24) +#define TXBC_NDTB_MASK GENMASK(21, 16) + +/* Tx FIFO/Queue Status (TXFQS) */ +#define TXFQS_TFQF BIT(21) +#define TXFQS_TFQPI_MASK GENMASK(20, 16) +#define TXFQS_TFGI_MASK GENMASK(12, 8) +#define TXFQS_TFFL_MASK GENMASK(5, 0) + +/* Tx Buffer Element Size Configuration (TXESC) */ +#define TXESC_TBDS_MASK GENMASK(2, 0) +#define TXESC_TBDS_64B 0x7 + +/* Tx Event FIFO Configuration (TXEFC) */ +#define TXEFC_EFS_MASK GENMASK(21, 16) + +/* Tx Event FIFO Status (TXEFS) */ +#define TXEFS_TEFL BIT(25) +#define TXEFS_EFF BIT(24) +#define TXEFS_EFGI_MASK GENMASK(12, 8) +#define TXEFS_EFFL_MASK GENMASK(5, 0) + +/* Tx Event FIFO Acknowledge (TXEFA) */ +#define TXEFA_EFAI_MASK GENMASK(4, 0) + +/* Message RAM Configuration (in bytes) */ +#define SIDF_ELEMENT_SIZE 4 +#define XIDF_ELEMENT_SIZE 8 +#define RXF0_ELEMENT_SIZE 72 +#define RXF1_ELEMENT_SIZE 72 +#define RXB_ELEMENT_SIZE 72 +#define TXE_ELEMENT_SIZE 8 +#define TXB_ELEMENT_SIZE 72 + +/* Message RAM Elements */ +#define M_CAN_FIFO_ID 0x0 +#define M_CAN_FIFO_DLC 0x4 +#define M_CAN_FIFO_DATA 0x8 + +/* Rx Buffer Element */ +/* R0 */ +#define RX_BUF_ESI BIT(31) +#define RX_BUF_XTD BIT(30) +#define RX_BUF_RTR BIT(29) +/* R1 */ +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_FDF BIT(21) +#define RX_BUF_BRS BIT(20) +#define RX_BUF_RXTS_MASK GENMASK(15, 0) + +/* Tx Buffer Element */ +/* T0 */ +#define TX_BUF_ESI BIT(31) +#define TX_BUF_XTD BIT(30) +#define TX_BUF_RTR BIT(29) +/* T1 */ +#define TX_BUF_EFC BIT(23) +#define TX_BUF_FDF BIT(21) +#define TX_BUF_BRS BIT(20) +#define TX_BUF_MM_MASK GENMASK(31, 24) +#define TX_BUF_DLC_MASK GENMASK(19, 16) + +/* Tx event FIFO Element */ +/* E1 */ +#define TX_EVENT_MM_MASK GENMASK(31, 24) +#define TX_EVENT_TXTS_MASK GENMASK(15, 0) + +/* Hrtimer polling interval */ +#define HRTIMER_POLL_INTERVAL_MS 1 + +/* The ID and DLC registers are adjacent in M_CAN FIFO memory, + * and we can save a (potentially slow) bus round trip by combining + * reads and writes to them. + */ +struct id_and_dlc { + u32 id; + u32 dlc; +}; + +static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) +{ + return cdev->ops->read_reg(cdev, reg); +} + +static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, + u32 val) +{ + cdev->ops->write_reg(cdev, reg, val); +} + +static int +m_can_fifo_read(struct m_can_classdev *cdev, + u32 fgi, unsigned int offset, void *val, size_t val_count) +{ + u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + + offset; + + if (val_count == 0) + return 0; + + return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); +} + +static int +m_can_fifo_write(struct m_can_classdev *cdev, + u32 fpi, unsigned int offset, const void *val, size_t val_count) +{ + u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + + offset; + + if (val_count == 0) + return 0; + + return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); +} + +static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, + u32 fpi, u32 val) +{ + return cdev->ops->write_fifo(cdev, fpi, &val, 1); +} + +static int +m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) +{ + u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + + offset; + + return cdev->ops->read_fifo(cdev, addr_offset, val, 1); +} + +static inline bool _m_can_tx_fifo_full(u32 txfqs) +{ + return !!(txfqs & TXFQS_TFQF); +} + +static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) +{ + return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS)); +} + +static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) +{ + u32 cccr = m_can_read(cdev, M_CAN_CCCR); + u32 timeout = 10; + u32 val = 0; + + /* Clear the Clock stop request if it was set */ + if (cccr & CCCR_CSR) + cccr &= ~CCCR_CSR; + + if (enable) { + /* enable m_can configuration */ + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); + /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); + } else { + m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + } + + /* there's a delay for module initialization */ + if (enable) + val = CCCR_INIT | CCCR_CCE; + + while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { + if (timeout == 0) { + netdev_warn(cdev->net, "Failed to init module\n"); + return; + } + timeout--; + udelay(1); + } +} + +static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) +{ + /* Only interrupt line 0 is used in this driver */ + m_can_write(cdev, M_CAN_ILE, ILE_EINT0); +} + +static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) +{ + m_can_write(cdev, M_CAN_ILE, 0x0); +} + +/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit + * width. + */ +static u32 m_can_get_timestamp(struct m_can_classdev *cdev) +{ + u32 tscv; + u32 tsc; + + tscv = m_can_read(cdev, M_CAN_TSCV); + tsc = FIELD_GET(TSCV_TSC_MASK, tscv); + + return (tsc << 16); +} + +static void m_can_clean(struct net_device *net) +{ + struct m_can_classdev *cdev = netdev_priv(net); + + if (cdev->tx_skb) { + int putidx = 0; + + net->stats.tx_errors++; + if (cdev->version > 30) + putidx = FIELD_GET(TXFQS_TFQPI_MASK, + m_can_read(cdev, M_CAN_TXFQS)); + + can_free_echo_skb(cdev->net, putidx, NULL); + cdev->tx_skb = NULL; + } +} + +/* For peripherals, pass skb to rx-offload, which will push skb from + * napi. For non-peripherals, RX is done in napi already, so push + * directly. timestamp is used to ensure good skb ordering in + * rx-offload and is ignored for non-peripherals. + */ +static void m_can_receive_skb(struct m_can_classdev *cdev, + struct sk_buff *skb, + u32 timestamp) +{ + if (cdev->is_peripheral) { + struct net_device_stats *stats = &cdev->net->stats; + int err; + + err = can_rx_offload_queue_timestamp(&cdev->offload, skb, + timestamp); + if (err) + stats->rx_fifo_errors++; + } else { + netif_receive_skb(skb); + } +} + +static int m_can_read_fifo(struct net_device *dev, u32 fgi) +{ + struct net_device_stats *stats = &dev->stats; + struct m_can_classdev *cdev = netdev_priv(dev); + struct canfd_frame *cf; + struct sk_buff *skb; + struct id_and_dlc fifo_header; + u32 timestamp = 0; + int err; + + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; + + if (fifo_header.dlc & RX_BUF_FDF) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return 0; + } + + if (fifo_header.dlc & RX_BUF_FDF) + cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); + else + cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); + + if (fifo_header.id & RX_BUF_XTD) + cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; + + if (fifo_header.id & RX_BUF_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(dev, "ESI Error\n"); + } + + if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { + cf->can_id |= CAN_RTR_FLAG; + } else { + if (fifo_header.dlc & RX_BUF_BRS) + cf->flags |= CANFD_BRS; + + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_free_skb; + + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16; + + m_can_receive_skb(cdev, skb, timestamp); + + return 0; + +out_free_skb: + kfree_skb(skb); +out_fail: + netdev_err(dev, "FIFO read returned %d\n", err); + return err; +} + +static int m_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + u32 pkts = 0; + u32 rxfs; + u32 rx_count; + u32 fgi; + int ack_fgi = -1; + int i; + int err = 0; + + rxfs = m_can_read(cdev, M_CAN_RXF0S); + if (!(rxfs & RXFS_FFL_MASK)) { + netdev_dbg(dev, "no messages in fifo0\n"); + return 0; + } + + rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs); + fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); + + for (i = 0; i < rx_count && quota > 0; ++i) { + err = m_can_read_fifo(dev, fgi); + if (err) + break; + + quota--; + pkts++; + ack_fgi = fgi; + fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi); + } + + if (ack_fgi != -1) + m_can_write(cdev, M_CAN_RXF0A, ack_fgi); + + if (err) + return err; + + return pkts; +} + +static int m_can_handle_lost_msg(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + u32 timestamp = 0; + + netdev_err(dev, "msg lost in rxf0\n"); + + stats->rx_errors++; + stats->rx_over_errors++; + + skb = alloc_can_err_skb(dev, &frame); + if (unlikely(!skb)) + return 0; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); + + return 1; +} + +static int m_can_handle_lec_err(struct net_device *dev, + enum m_can_lec_type lec_type) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp = 0; + + cdev->can.can_stats.bus_error++; + stats->rx_errors++; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + /* check for 'last error code' which tells us the + * type of the last error to occur on the CAN bus + */ + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (lec_type) { + case LEC_STUFF_ERROR: + netdev_dbg(dev, "stuff error\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case LEC_FORM_ERROR: + netdev_dbg(dev, "form error\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case LEC_ACK_ERROR: + netdev_dbg(dev, "ack error\n"); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + break; + case LEC_BIT1_ERROR: + netdev_dbg(dev, "bit1 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case LEC_BIT0_ERROR: + netdev_dbg(dev, "bit0 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case LEC_CRC_ERROR: + netdev_dbg(dev, "CRC error\n"); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + break; + default: + break; + } + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); + + return 1; +} + +static int __m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + unsigned int ecr; + + ecr = m_can_read(cdev, M_CAN_ECR); + bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); + bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); + + return 0; +} + +static int m_can_clk_start(struct m_can_classdev *cdev) +{ + if (cdev->pm_clock_support == 0) + return 0; + + return pm_runtime_resume_and_get(cdev->dev); +} + +static void m_can_clk_stop(struct m_can_classdev *cdev) +{ + if (cdev->pm_clock_support) + pm_runtime_put_sync(cdev->dev); +} + +static int m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int err; + + err = m_can_clk_start(cdev); + if (err) + return err; + + __m_can_get_berr_counter(dev, bec); + + m_can_clk_stop(cdev); + + return 0; +} + +static int m_can_handle_state_change(struct net_device *dev, + enum can_state new_state) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + unsigned int ecr; + u32 timestamp = 0; + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + /* error warning state */ + cdev->can.can_stats.error_warning++; + cdev->can.state = CAN_STATE_ERROR_WARNING; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + cdev->can.can_stats.error_passive++; + cdev->can.state = CAN_STATE_ERROR_PASSIVE; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + cdev->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(cdev); + cdev->can.can_stats.bus_off++; + can_bus_off(dev); + break; + default: + break; + } + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + __m_can_get_berr_counter(dev, &bec); + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + ecr = m_can_read(cdev, M_CAN_ECR); + if (ecr & ECR_RP) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + cf->can_id |= CAN_ERR_BUSOFF; + break; + default: + break; + } + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); + + return 1; +} + +static int m_can_handle_state_errors(struct net_device *dev, u32 psr) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done = 0; + + if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_WARNING); + } + + if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { + netdev_dbg(dev, "entered error passive state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_PASSIVE); + } + + if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { + netdev_dbg(dev, "entered error bus off state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_BUS_OFF); + } + + return work_done; +} + +static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) +{ + if (irqstatus & IR_WDI) + netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); + if (irqstatus & IR_BEU) + netdev_err(dev, "Bit Error Uncorrected\n"); + if (irqstatus & IR_BEC) + netdev_err(dev, "Bit Error Corrected\n"); + if (irqstatus & IR_TOO) + netdev_err(dev, "Timeout reached\n"); + if (irqstatus & IR_MRAF) + netdev_err(dev, "Message RAM access failure occurred\n"); +} + +static inline bool is_lec_err(u8 lec) +{ + return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE; +} + +static inline bool m_can_is_protocol_err(u32 irqstatus) +{ + return irqstatus & IR_ERR_LEC_31X; +} + +static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) +{ + struct net_device_stats *stats = &dev->stats; + struct m_can_classdev *cdev = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp = 0; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + + /* update tx error stats since there is protocol error */ + stats->tx_errors++; + + /* update arbitration lost status */ + if (cdev->version >= 31 && (irqstatus & IR_PEA)) { + netdev_dbg(dev, "Protocol error in Arbitration fail\n"); + cdev->can.can_stats.arbitration_lost++; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; + } + } + + if (unlikely(!skb)) { + netdev_dbg(dev, "allocation of skb failed\n"); + return 0; + } + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); + + return 1; +} + +static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, + u32 psr) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done = 0; + + if (irqstatus & IR_RF0L) + work_done += m_can_handle_lost_msg(dev); + + /* handle lec errors on the bus */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + u8 lec = FIELD_GET(PSR_LEC_MASK, psr); + u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr); + + if (is_lec_err(lec)) { + netdev_dbg(dev, "Arbitration phase error detected\n"); + work_done += m_can_handle_lec_err(dev, lec); + } + + if (is_lec_err(dlec)) { + netdev_dbg(dev, "Data phase error detected\n"); + work_done += m_can_handle_lec_err(dev, dlec); + } + } + + /* handle protocol errors in arbitration phase */ + if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + m_can_is_protocol_err(irqstatus)) + work_done += m_can_handle_protocol_error(dev, irqstatus); + + /* other unproccessed error interrupts */ + m_can_handle_other_err(dev, irqstatus); + + return work_done; +} + +static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int rx_work_or_err; + int work_done = 0; + + if (!irqstatus) + goto end; + + /* Errata workaround for issue "Needless activation of MRAF irq" + * During frame reception while the MCAN is in Error Passive state + * and the Receive Error Counter has the value MCAN_ECR.REC = 127, + * it may happen that MCAN_IR.MRAF is set although there was no + * Message RAM access failure. + * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated + * The Message RAM Access Failure interrupt routine needs to check + * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. + * In this case, reset MCAN_IR.MRAF. No further action is required. + */ + if (cdev->version <= 31 && irqstatus & IR_MRAF && + m_can_read(cdev, M_CAN_ECR) & ECR_RP) { + struct can_berr_counter bec; + + __m_can_get_berr_counter(dev, &bec); + if (bec.rxerr == 127) { + m_can_write(cdev, M_CAN_IR, IR_MRAF); + irqstatus &= ~IR_MRAF; + } + } + + if (irqstatus & IR_ERR_STATE) + work_done += m_can_handle_state_errors(dev, + m_can_read(cdev, M_CAN_PSR)); + + if (irqstatus & IR_ERR_BUS_30X) + work_done += m_can_handle_bus_errors(dev, irqstatus, + m_can_read(cdev, M_CAN_PSR)); + + if (irqstatus & IR_RF0N) { + rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + } +end: + return work_done; +} + +static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; + + work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus); + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure). + */ + if (work_done < 0) + m_can_disable_all_interrupts(cdev); + + return work_done; +} + +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; + u32 irqstatus; + + irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); + + work_done = m_can_rx_handler(dev, quota, irqstatus); + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure). + */ + if (work_done >= 0 && work_done < quota) { + napi_complete_done(napi, work_done); + m_can_enable_all_interrupts(cdev); + } + + return work_done; +} + +/* Echo tx skb and update net stats. Peripherals use rx-offload for + * echo. timestamp is used for peripherals to ensure correct ordering + * by rx-offload, and is ignored for non-peripherals. + */ +static void m_can_tx_update_stats(struct m_can_classdev *cdev, + unsigned int msg_mark, + u32 timestamp) +{ + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + + if (cdev->is_peripheral) + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload, + msg_mark, + timestamp, + NULL); + else + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); + + stats->tx_packets++; +} + +static int m_can_echo_tx_event(struct net_device *dev) +{ + u32 txe_count = 0; + u32 m_can_txefs; + u32 fgi = 0; + int ack_fgi = -1; + int i = 0; + int err = 0; + unsigned int msg_mark; + + struct m_can_classdev *cdev = netdev_priv(dev); + + /* read tx event fifo status */ + m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); + + /* Get Tx Event fifo element count */ + txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); + fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs); + + /* Get and process all sent elements */ + for (i = 0; i < txe_count; i++) { + u32 txe, timestamp = 0; + + /* get message marker, timestamp */ + err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); + if (err) { + netdev_err(dev, "TXE FIFO read returned %d\n", err); + break; + } + + msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); + timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16; + + ack_fgi = fgi; + fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi); + + /* update stats */ + m_can_tx_update_stats(cdev, msg_mark, timestamp); + } + + if (ack_fgi != -1) + m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, + ack_fgi)); + + return err; +} + +static irqreturn_t m_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct m_can_classdev *cdev = netdev_priv(dev); + u32 ir; + + if (pm_runtime_suspended(cdev->dev)) + return IRQ_NONE; + ir = m_can_read(cdev, M_CAN_IR); + if (!ir) + return IRQ_NONE; + + /* ACK all irqs */ + m_can_write(cdev, M_CAN_IR, ir); + + if (cdev->ops->clear_interrupts) + cdev->ops->clear_interrupts(cdev); + + /* schedule NAPI in case of + * - rx IRQ + * - state change IRQ + * - bus error IRQ and bus error reporting + */ + if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { + cdev->irqstatus = ir; + if (!cdev->is_peripheral) { + m_can_disable_all_interrupts(cdev); + napi_schedule(&cdev->napi); + } else if (m_can_rx_peripheral(dev, ir) < 0) { + goto out_fail; + } + } + + if (cdev->version == 30) { + if (ir & IR_TC) { + /* Transmission Complete Interrupt*/ + u32 timestamp = 0; + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + m_can_tx_update_stats(cdev, 0, timestamp); + netif_wake_queue(dev); + } + } else { + if (ir & IR_TEFN) { + /* New TX FIFO Element arrived */ + if (m_can_echo_tx_event(dev) != 0) + goto out_fail; + + if (netif_queue_stopped(dev) && + !m_can_tx_fifo_full(cdev)) + netif_wake_queue(dev); + } + } + + if (cdev->is_peripheral) + can_rx_offload_threaded_irq_finish(&cdev->offload); + + return IRQ_HANDLED; + +out_fail: + m_can_disable_all_interrupts(cdev); + return IRQ_HANDLED; +} + +static const struct can_bittiming_const m_can_bittiming_const_30X = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_30X = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_bittiming_const_31X = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 256, + .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_31X = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 32, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static int m_can_set_bittiming(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.data_bittiming; + u16 brp, sjw, tseg1, tseg2; + u32 reg_btp; + + brp = bt->brp - 1; + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | + FIELD_PREP(NBTP_NSJW_MASK, sjw) | + FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | + FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); + m_can_write(cdev, M_CAN_NBTP, reg_btp); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + reg_btp = 0; + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + + /* TDC is only needed for bitrates beyond 2.5 MBit/s. + * This is mentioned in the "Bit Time Requirements for CAN FD" + * paper presented at the International CAN Conference 2013 + */ + if (dbt->bitrate > 2500000) { + u32 tdco, ssp; + + /* Use the same value of secondary sampling point + * as the data sampling point + */ + ssp = dbt->sample_point; + + /* Equation based on Bosch's M_CAN User Manual's + * Transmitter Delay Compensation Section + */ + tdco = (cdev->can.clock.freq / 1000) * + ssp / dbt->bitrate; + + /* Max valid TDCO value is 127 */ + if (tdco > 127) { + netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n", + tdco); + tdco = 127; + } + + reg_btp |= DBTP_TDC; + m_can_write(cdev, M_CAN_TDCR, + FIELD_PREP(TDCR_TDCO_MASK, tdco)); + } + + reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | + FIELD_PREP(DBTP_DSJW_MASK, sjw) | + FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | + FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); + + m_can_write(cdev, M_CAN_DBTP, reg_btp); + } + + return 0; +} + +/* Configure M_CAN chip: + * - set rx buffer/fifo element size + * - configure rx fifo + * - accept non-matching frame into fifo 0 + * - configure tx buffer + * - >= v3.1.x: TX FIFO is used + * - configure mode + * - setup bittiming + * - configure timestamp generation + */ +static int m_can_chip_config(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + u32 interrupts = IR_ALL_INT; + u32 cccr, test; + int err; + + err = m_can_init_ram(cdev); + if (err) { + dev_err(cdev->dev, "Message RAM configuration failed\n"); + return err; + } + + /* Disable unused interrupts */ + interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE | + IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | + IR_RF0F | IR_RF0W); + + m_can_config_endisable(cdev, true); + + /* RX Buffer/FIFO Element Size 64 bytes data field */ + m_can_write(cdev, M_CAN_RXESC, + FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) | + FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) | + FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B)); + + /* Accept Non-matching Frames Into FIFO 0 */ + m_can_write(cdev, M_CAN_GFC, 0x0); + + if (cdev->version == 30) { + /* only support one Tx Buffer currently */ + m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) | + cdev->mcfg[MRAM_TXB].off); + } else { + /* TX FIFO is used for newer IP Core versions */ + m_can_write(cdev, M_CAN_TXBC, + FIELD_PREP(TXBC_TFQS_MASK, + cdev->mcfg[MRAM_TXB].num) | + cdev->mcfg[MRAM_TXB].off); + } + + /* support 64 bytes payload */ + m_can_write(cdev, M_CAN_TXESC, + FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B)); + + /* TX Event FIFO */ + if (cdev->version == 30) { + m_can_write(cdev, M_CAN_TXEFC, + FIELD_PREP(TXEFC_EFS_MASK, 1) | + cdev->mcfg[MRAM_TXE].off); + } else { + /* Full TX Event FIFO is used */ + m_can_write(cdev, M_CAN_TXEFC, + FIELD_PREP(TXEFC_EFS_MASK, + cdev->mcfg[MRAM_TXE].num) | + cdev->mcfg[MRAM_TXE].off); + } + + /* rx fifo configuration, blocking mode, fifo size 1 */ + m_can_write(cdev, M_CAN_RXF0C, + FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | + cdev->mcfg[MRAM_RXF0].off); + + m_can_write(cdev, M_CAN_RXF1C, + FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | + cdev->mcfg[MRAM_RXF1].off); + + cccr = m_can_read(cdev, M_CAN_CCCR); + test = m_can_read(cdev, M_CAN_TEST); + test &= ~TEST_LBCK; + if (cdev->version == 30) { + /* Version 3.0.x */ + + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | + FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) | + FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK))); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS); + + } else { + /* Version 3.1.x or 3.2.x */ + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | + CCCR_NISO | CCCR_DAR); + + /* Only 3.2.x has NISO Bit implemented */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + cccr |= CCCR_NISO; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= (CCCR_BRSE | CCCR_FDOE); + } + + /* Loopback Mode */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + cccr |= CCCR_TEST | CCCR_MON; + test |= TEST_LBCK; + } + + /* Enable Monitoring (all versions) */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cccr |= CCCR_MON; + + /* Disable Auto Retransmission (all versions) */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + cccr |= CCCR_DAR; + + /* Write config */ + m_can_write(cdev, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_TEST, test); + + /* Enable interrupts */ + if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { + if (cdev->version == 30) + interrupts &= ~(IR_ERR_LEC_30X); + else + interrupts &= ~(IR_ERR_LEC_31X); + } + m_can_write(cdev, M_CAN_IE, interrupts); + + /* route all interrupts to INT0 */ + m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); + + /* set bittiming params */ + m_can_set_bittiming(dev); + + /* enable internal timestamp generation, with a prescaler of 16. The + * prescaler is applied to the nominal bit timing + */ + m_can_write(cdev, M_CAN_TSCC, + FIELD_PREP(TSCC_TCP_MASK, 0xf) | + FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL)); + + m_can_config_endisable(cdev, false); + + if (cdev->ops->init) + cdev->ops->init(cdev); + + return 0; +} + +static int m_can_start(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int ret; + + /* basic m_can configuration */ + ret = m_can_chip_config(dev); + if (ret) + return ret; + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + m_can_enable_all_interrupts(cdev); + + if (!dev->irq) { + dev_dbg(cdev->dev, "Start hrtimer\n"); + hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), + HRTIMER_MODE_REL_PINNED); + } + + return 0; +} + +static int m_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + m_can_clean(dev); + m_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/* Checks core release number of M_CAN + * returns 0 if an unsupported device is detected + * else it returns the release and step coded as: + * return value = 10 * + 1 * + */ +static int m_can_check_core_release(struct m_can_classdev *cdev) +{ + u32 crel_reg; + u8 rel; + u8 step; + int res; + + /* Read Core Release Version and split into version number + * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; + */ + crel_reg = m_can_read(cdev, M_CAN_CREL); + rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg); + step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg); + + if (rel == 3) { + /* M_CAN v3.x.y: create return value */ + res = 30 + step; + } else { + /* Unsupported M_CAN version */ + res = 0; + } + + return res; +} + +/* Selectable Non ISO support only in version 3.2.x + * This function checks if the bit is writable. + */ +static bool m_can_niso_supported(struct m_can_classdev *cdev) +{ + u32 cccr_reg, cccr_poll = 0; + int niso_timeout = -ETIMEDOUT; + int i; + + m_can_config_endisable(cdev, true); + cccr_reg = m_can_read(cdev, M_CAN_CCCR); + cccr_reg |= CCCR_NISO; + m_can_write(cdev, M_CAN_CCCR, cccr_reg); + + for (i = 0; i <= 10; i++) { + cccr_poll = m_can_read(cdev, M_CAN_CCCR); + if (cccr_poll == cccr_reg) { + niso_timeout = 0; + break; + } + + usleep_range(1, 5); + } + + /* Clear NISO */ + cccr_reg &= ~(CCCR_NISO); + m_can_write(cdev, M_CAN_CCCR, cccr_reg); + + m_can_config_endisable(cdev, false); + + /* return false if time out (-ETIMEDOUT), else return true */ + return !niso_timeout; +} + +static int m_can_dev_setup(struct m_can_classdev *cdev) +{ + struct net_device *dev = cdev->net; + int m_can_version, err; + + m_can_version = m_can_check_core_release(cdev); + /* return if unsupported version */ + if (!m_can_version) { + dev_err(cdev->dev, "Unsupported version number: %2d", + m_can_version); + return -EINVAL; + } + + if (!cdev->is_peripheral) + netif_napi_add(dev, &cdev->napi, m_can_poll); + + /* Shared properties of all M_CAN versions */ + cdev->version = m_can_version; + cdev->can.do_set_mode = m_can_set_mode; + cdev->can.do_get_berr_counter = m_can_get_berr_counter; + + /* Set M_CAN supported operations */ + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_ONE_SHOT; + + /* Set properties depending on M_CAN version */ + switch (cdev->version) { + case 30: + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = &m_can_bittiming_const_30X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; + break; + case 31: + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + break; + case 32: + case 33: + /* Support both MCAN version v3.2.x and v3.3.0 */ + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + + cdev->can.ctrlmode_supported |= + (m_can_niso_supported(cdev) ? + CAN_CTRLMODE_FD_NON_ISO : 0); + break; + default: + dev_err(cdev->dev, "Unsupported version number: %2d", + cdev->version); + return -EINVAL; + } + + if (cdev->ops->init) + cdev->ops->init(cdev); + + return 0; +} + +static void m_can_stop(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (!dev->irq) { + dev_dbg(cdev->dev, "Stop hrtimer\n"); + hrtimer_cancel(&cdev->hrtimer); + } + + /* disable all interrupts */ + m_can_disable_all_interrupts(cdev); + + /* Set init mode to disengage from the network */ + m_can_config_endisable(cdev, true); + + /* set the state as STOPPED */ + cdev->can.state = CAN_STATE_STOPPED; +} + +static int m_can_close(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + netif_stop_queue(dev); + + if (!cdev->is_peripheral) + napi_disable(&cdev->napi); + + m_can_stop(dev); + m_can_clk_stop(cdev); + free_irq(dev->irq, dev); + + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + destroy_workqueue(cdev->tx_wq); + cdev->tx_wq = NULL; + can_rx_offload_disable(&cdev->offload); + } + + close_candev(dev); + + phy_power_off(cdev->transceiver); + + return 0; +} + +static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + /*get wrap around for loopback skb index */ + unsigned int wrap = cdev->can.echo_skb_max; + int next_idx; + + /* calculate next index */ + next_idx = (++putidx >= wrap ? 0 : putidx); + + /* check if occupied */ + return !!cdev->can.echo_skb[next_idx]; +} + +static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) +{ + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct sk_buff *skb = cdev->tx_skb; + struct id_and_dlc fifo_header; + u32 cccr, fdflags; + u32 txfqs; + int err; + int putidx; + + cdev->tx_skb = NULL; + + /* Generate ID field for TX buffer Element */ + /* Common to all supported M_CAN versions */ + if (cf->can_id & CAN_EFF_FLAG) { + fifo_header.id = cf->can_id & CAN_EFF_MASK; + fifo_header.id |= TX_BUF_XTD; + } else { + fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); + } + + if (cf->can_id & CAN_RTR_FLAG) + fifo_header.id |= TX_BUF_RTR; + + if (cdev->version == 30) { + netif_stop_queue(dev); + + fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; + + /* Write the frame ID, DLC, and payload to the FIFO element. */ + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; + + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_fail; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(cdev, M_CAN_CCCR); + cccr &= ~CCCR_CMR_MASK; + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= FIELD_PREP(CCCR_CMR_MASK, + CCCR_CMR_CANFD_BRS); + else + cccr |= FIELD_PREP(CCCR_CMR_MASK, + CCCR_CMR_CANFD); + } else { + cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN); + } + m_can_write(cdev, M_CAN_CCCR, cccr); + } + m_can_write(cdev, M_CAN_TXBTIE, 0x1); + + can_put_echo_skb(skb, dev, 0, 0); + + m_can_write(cdev, M_CAN_TXBAR, 0x1); + /* End of xmit function for version 3.0.x */ + } else { + /* Transmit routine for version >= v3.1.x */ + + txfqs = m_can_read(cdev, M_CAN_TXFQS); + + /* Check if FIFO full */ + if (_m_can_tx_fifo_full(txfqs)) { + /* This shouldn't happen */ + netif_stop_queue(dev); + netdev_warn(dev, + "TX queue active although FIFO is full."); + + if (cdev->is_peripheral) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } else { + return NETDEV_TX_BUSY; + } + } + + /* get put index for frame */ + putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs); + + /* Construct DLC Field, with CAN-FD configuration. + * Use the put index of the fifo as the message marker, + * used in the TX interrupt for sending the correct echo frame. + */ + + /* get CAN FD configuration of frame */ + fdflags = 0; + if (can_is_canfd_skb(skb)) { + fdflags |= TX_BUF_FDF; + if (cf->flags & CANFD_BRS) + fdflags |= TX_BUF_BRS; + } + + fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | + FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | + fdflags | TX_BUF_EFC; + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; + + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_fail; + + /* Push loopback echo. + * Will be looped back on TX interrupt based on message marker + */ + can_put_echo_skb(skb, dev, putidx, 0); + + /* Enable TX FIFO element to start transfer */ + m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); + + /* stop network queue if fifo full */ + if (m_can_tx_fifo_full(cdev) || + m_can_next_echo_skb_occupied(dev, putidx)) + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; + +out_fail: + netdev_err(dev, "FIFO write returned %d\n", err); + m_can_disable_all_interrupts(cdev); + return NETDEV_TX_BUSY; +} + +static void m_can_tx_work_queue(struct work_struct *ws) +{ + struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, + tx_work); + + m_can_tx_handler(cdev); +} + +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (can_dev_dropped_skb(dev, skb)) + return NETDEV_TX_OK; + + if (cdev->is_peripheral) { + if (cdev->tx_skb) { + netdev_err(dev, "hard_xmit called while tx busy\n"); + return NETDEV_TX_BUSY; + } + + if (cdev->can.state == CAN_STATE_BUS_OFF) { + m_can_clean(dev); + } else { + /* Need to stop the queue to avoid numerous requests + * from being sent. Suggested improvement is to create + * a queueing mechanism that will queue the skbs and + * process them in order. + */ + cdev->tx_skb = skb; + netif_stop_queue(cdev->net); + queue_work(cdev->tx_wq, &cdev->tx_work); + } + } else { + cdev->tx_skb = skb; + return m_can_tx_handler(cdev); + } + + return NETDEV_TX_OK; +} + +static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) +{ + struct m_can_classdev *cdev = container_of(timer, struct + m_can_classdev, hrtimer); + + m_can_isr(0, cdev->net); + + hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS)); + + return HRTIMER_RESTART; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int err; + + err = phy_power_on(cdev->transceiver); + if (err) + return err; + + err = m_can_clk_start(cdev); + if (err) + goto out_phy_power_off; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_disable_clks; + } + + if (cdev->is_peripheral) + can_rx_offload_enable(&cdev->offload); + + /* register interrupt handler */ + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + cdev->tx_wq = alloc_workqueue("mcan_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!cdev->tx_wq) { + err = -ENOMEM; + goto out_wq_fail; + } + + INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); + + err = request_threaded_irq(dev->irq, NULL, m_can_isr, + IRQF_ONESHOT, + dev->name, dev); + } else if (dev->irq) { + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + } + + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + err = m_can_start(dev); + if (err) + goto exit_irq_fail; + + if (!cdev->is_peripheral) + napi_enable(&cdev->napi); + + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + if (cdev->is_peripheral) + destroy_workqueue(cdev->tx_wq); +out_wq_fail: + if (cdev->is_peripheral) + can_rx_offload_disable(&cdev->offload); + close_candev(dev); +exit_disable_clks: + m_can_clk_stop(cdev); +out_phy_power_off: + phy_power_off(cdev->transceiver); + return err; +} + +static const struct net_device_ops m_can_netdev_ops = { + .ndo_open = m_can_open, + .ndo_stop = m_can_close, + .ndo_start_xmit = m_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops m_can_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int register_m_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; /* we support local echo */ + dev->netdev_ops = &m_can_netdev_ops; + dev->ethtool_ops = &m_can_ethtool_ops; + + return register_candev(dev); +} + +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size) +{ + u32 total_size; + + total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + if (total_size > mram_max_size) { + dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n", + total_size, mram_max_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(m_can_check_mram_cfg); + +static void m_can_of_parse_mram(struct m_can_classdev *cdev, + const u32 *mram_config_vals) +{ + cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; + cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; + cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; + cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + + cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & + FIELD_MAX(RXFC_FS_MASK); + cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + + cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & + FIELD_MAX(RXFC_FS_MASK); + cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + + cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; + cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + + cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; + cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + + cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & + FIELD_MAX(TXBC_NDTB_MASK); + + dev_dbg(cdev->dev, + "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, + cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, + cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, + cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, + cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, + cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, + cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); +} + +int m_can_init_ram(struct m_can_classdev *cdev) +{ + int end, i, start; + int err = 0; + + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = cdev->mcfg[MRAM_SIDF].off; + end = cdev->mcfg[MRAM_TXB].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + + for (i = start; i < end; i += 4) { + err = m_can_fifo_write_no_off(cdev, i, 0x0); + if (err) + break; + } + + return err; +} +EXPORT_SYMBOL_GPL(m_can_init_ram); + +int m_can_class_get_clocks(struct m_can_classdev *cdev) +{ + int ret = 0; + + cdev->hclk = devm_clk_get(cdev->dev, "hclk"); + cdev->cclk = devm_clk_get(cdev->dev, "cclk"); + + if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) { + dev_err(cdev->dev, "no clock found\n"); + ret = -ENODEV; + } + + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_get_clocks); + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, + int sizeof_priv) +{ + struct m_can_classdev *class_dev = NULL; + u32 mram_config_vals[MRAM_CFG_LEN]; + struct net_device *net_dev; + u32 tx_fifo_size; + int ret; + + ret = fwnode_property_read_u32_array(dev_fwnode(dev), + "bosch,mram-cfg", + mram_config_vals, + sizeof(mram_config_vals) / 4); + if (ret) { + dev_err(dev, "Could not get Message RAM configuration."); + goto out; + } + + /* Get TX FIFO size + * Defines the total amount of echo buffers for loopback + */ + tx_fifo_size = mram_config_vals[7]; + + /* allocate the m_can device */ + net_dev = alloc_candev(sizeof_priv, tx_fifo_size); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device"); + goto out; + } + + class_dev = netdev_priv(net_dev); + class_dev->net = net_dev; + class_dev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); + + m_can_of_parse_mram(class_dev, mram_config_vals); +out: + return class_dev; +} +EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); + +void m_can_class_free_dev(struct net_device *net) +{ + free_candev(net); +} +EXPORT_SYMBOL_GPL(m_can_class_free_dev); + +int m_can_class_register(struct m_can_classdev *cdev) +{ + int ret; + + if (cdev->pm_clock_support) { + ret = m_can_clk_start(cdev); + if (ret) + return ret; + } + + if (cdev->is_peripheral) { + ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, + NAPI_POLL_WEIGHT); + if (ret) + goto clk_disable; + } + + if (!cdev->net->irq) + cdev->hrtimer.function = &hrtimer_callback; + + ret = m_can_dev_setup(cdev); + if (ret) + goto rx_offload_del; + + ret = register_m_can_dev(cdev->net); + if (ret) { + dev_err(cdev->dev, "registering %s failed (err=%d)\n", + cdev->net->name, ret); + goto rx_offload_del; + } + + of_can_transceiver(cdev->net); + + dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", + KBUILD_MODNAME, cdev->net->irq, cdev->version); + + /* Probe finished + * Stop clocks. They will be reactivated once the M_CAN device is opened + */ + m_can_clk_stop(cdev); + + return 0; + +rx_offload_del: + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); +clk_disable: + m_can_clk_stop(cdev); + + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_register); + +void m_can_class_unregister(struct m_can_classdev *cdev) +{ + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); + unregister_candev(cdev->net); +} +EXPORT_SYMBOL_GPL(m_can_class_unregister); + +int m_can_class_suspend(struct device *dev) +{ + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + m_can_stop(ndev); + m_can_clk_stop(cdev); + } + + pinctrl_pm_select_sleep_state(dev); + + cdev->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL_GPL(m_can_class_suspend); + +int m_can_class_resume(struct device *dev) +{ + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; + + pinctrl_pm_select_default_state(dev); + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + int ret; + + ret = m_can_clk_start(cdev); + if (ret) + return ret; + ret = m_can_start(ndev); + if (ret) { + m_can_clk_stop(cdev); + + return ret; + } + + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(m_can_class_resume); + +MODULE_AUTHOR("Dong Aisheng "); +MODULE_AUTHOR("Dan Murphy "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h new file mode 100644 index 0000000000..520e14277d --- /dev/null +++ b/drivers/net/can/m_can/m_can.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* CAN bus driver for Bosch M_CAN controller + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _CAN_M_CAN_H_ +#define _CAN_M_CAN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_NO_CHANGE, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +struct m_can_classdev; +struct m_can_ops { + /* Device specific call backs */ + int (*clear_interrupts)(struct m_can_classdev *cdev); + u32 (*read_reg)(struct m_can_classdev *cdev, int reg); + int (*write_reg)(struct m_can_classdev *cdev, int reg, int val); + int (*read_fifo)(struct m_can_classdev *cdev, int addr_offset, void *val, size_t val_count); + int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, + const void *val, size_t val_count); + int (*init)(struct m_can_classdev *cdev); +}; + +struct m_can_classdev { + struct can_priv can; + struct can_rx_offload offload; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *hclk; + struct clk *cclk; + + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + struct sk_buff *tx_skb; + struct phy *transceiver; + + struct m_can_ops *ops; + + int version; + u32 irqstatus; + + int pm_clock_support; + int is_peripheral; + + struct mram_cfg mcfg[MRAM_CFG_NUM]; + + struct hrtimer hrtimer; +}; + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv); +void m_can_class_free_dev(struct net_device *net); +int m_can_class_register(struct m_can_classdev *cdev); +void m_can_class_unregister(struct m_can_classdev *cdev); +int m_can_class_get_clocks(struct m_can_classdev *cdev); +int m_can_init_ram(struct m_can_classdev *priv); +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size); + +int m_can_class_suspend(struct device *dev); +int m_can_class_resume(struct device *dev); +#endif /* _CAN_M_H_ */ diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c new file mode 100644 index 0000000000..f2219aa282 --- /dev/null +++ b/drivers/net/can/m_can/m_can_pci.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Specific M_CAN Glue + * + * Copyright (C) 2018-2020 Intel Corporation + * Author: Felipe Balbi (Intel) + * Author: Jarkko Nikula + * Author: Raymond Tan + */ + +#include +#include +#include +#include +#include + +#include "m_can.h" + +#define M_CAN_PCI_MMIO_BAR 0 + +#define M_CAN_CLOCK_FREQ_EHL 200000000 +#define CTL_CSR_INT_CTL_OFFSET 0x508 + +struct m_can_pci_priv { + struct m_can_classdev cdev; + + void __iomem *base; +}; + +static inline struct m_can_pci_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_pci_priv, cdev); +} + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + reg); +} + +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->base + offset; + + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } + + return 0; +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->base + offset; + + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } + + return 0; +} + +static struct m_can_ops m_can_pci_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct device *dev = &pci->dev; + struct m_can_classdev *mcan_class; + struct m_can_pci_priv *priv; + void __iomem *base; + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pcim_iomap_regions(pci, BIT(M_CAN_PCI_MMIO_BAR), pci_name(pci)); + if (ret) + return ret; + + base = pcim_iomap_table(pci)[M_CAN_PCI_MMIO_BAR]; + + if (!base) { + dev_err(dev, "failed to map BARs\n"); + return -ENOMEM; + } + + mcan_class = m_can_class_allocate_dev(&pci->dev, + sizeof(struct m_can_pci_priv)); + if (!mcan_class) + return -ENOMEM; + + priv = cdev_to_priv(mcan_class); + + priv->base = base; + + ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + goto err_free_dev; + + mcan_class->dev = &pci->dev; + mcan_class->net->irq = pci_irq_vector(pci, 0); + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = id->driver_data; + mcan_class->ops = &m_can_pci_ops; + + pci_set_drvdata(pci, mcan_class); + + ret = m_can_class_register(mcan_class); + if (ret) + goto err_free_irq; + + /* Enable interrupt control at CAN wrapper IP */ + writel(0x1, base + CTL_CSR_INT_CTL_OFFSET); + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + return 0; + +err_free_irq: + pci_free_irq_vectors(pci); +err_free_dev: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static void m_can_pci_remove(struct pci_dev *pci) +{ + struct m_can_classdev *mcan_class = pci_get_drvdata(pci); + struct m_can_pci_priv *priv = cdev_to_priv(mcan_class); + + pm_runtime_forbid(&pci->dev); + pm_runtime_get_noresume(&pci->dev); + + /* Disable interrupt control at CAN wrapper IP */ + writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET); + + m_can_class_unregister(mcan_class); + m_can_class_free_dev(mcan_class->net); + pci_free_irq_vectors(pci); +} + +static __maybe_unused int m_can_pci_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_pci_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, + m_can_pci_suspend, m_can_pci_resume); + +static const struct pci_device_id m_can_pci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); + +static struct pci_driver m_can_pci_driver = { + .name = "m_can_pci", + .probe = m_can_pci_probe, + .remove = m_can_pci_remove, + .id_table = m_can_pci_id_table, + .driver = { + .pm = &m_can_pci_pm_ops, + }, +}; + +module_pci_driver(m_can_pci_driver); + +MODULE_AUTHOR("Felipe Balbi (Intel)"); +MODULE_AUTHOR("Jarkko Nikula "); +MODULE_AUTHOR("Raymond Tan "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller on PCI bus"); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c new file mode 100644 index 0000000000..cdb28d6a09 --- /dev/null +++ b/drivers/net/can/m_can/m_can_platform.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +// IOMapped CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng +// +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include +#include +#include + +#include "m_can.h" + +struct m_can_plat_priv { + struct m_can_classdev cdev; + + void __iomem *base; + void __iomem *mram_base; +}; + +static inline struct m_can_plat_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_plat_priv, cdev); +} + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + reg); +} + +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->mram_base + offset; + + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } + + return 0; +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->mram_base + offset; + + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } + + return 0; +} + +static struct m_can_ops m_can_plat_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct m_can_classdev *mcan_class; + struct m_can_plat_priv *priv; + struct resource *res; + void __iomem *addr; + void __iomem *mram_addr; + struct phy *transceiver; + int irq = 0, ret = 0; + + mcan_class = m_can_class_allocate_dev(&pdev->dev, + sizeof(struct m_can_plat_priv)); + if (!mcan_class) + return -ENOMEM; + + priv = cdev_to_priv(mcan_class); + + ret = m_can_class_get_clocks(mcan_class); + if (ret) + goto probe_fail; + + addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto probe_fail; + } + + if (device_property_present(mcan_class->dev, "interrupts") || + device_property_present(mcan_class->dev, "interrupt-names")) { + irq = platform_get_irq_byname(pdev, "int0"); + if (irq < 0) { + ret = irq; + goto probe_fail; + } + } else { + dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer"); + hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + } + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) { + ret = -ENODEV; + goto probe_fail; + } + + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!mram_addr) { + ret = -ENOMEM; + goto probe_fail; + } + + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + ret = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); + goto probe_fail; + } + + if (transceiver) + mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate; + + priv->base = addr; + priv->mram_base = mram_addr; + + mcan_class->net->irq = irq; + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); + mcan_class->dev = &pdev->dev; + mcan_class->transceiver = transceiver; + + mcan_class->ops = &m_can_plat_ops; + + mcan_class->is_peripheral = false; + + platform_set_drvdata(pdev, mcan_class); + + pm_runtime_enable(mcan_class->dev); + ret = m_can_class_register(mcan_class); + if (ret) + goto out_runtime_disable; + + return ret; + +out_runtime_disable: + pm_runtime_disable(mcan_class->dev); +probe_fail: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static void m_can_plat_remove(struct platform_device *pdev) +{ + struct m_can_plat_priv *priv = platform_get_drvdata(pdev); + struct m_can_classdev *mcan_class = &priv->cdev; + + m_can_class_unregister(mcan_class); + + m_can_class_free_dev(mcan_class->net); +} + +static int __maybe_unused m_can_runtime_suspend(struct device *dev) +{ + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; + + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + + return 0; +} + +static int __maybe_unused m_can_runtime_resume(struct device *dev) +{ + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; + int err; + + err = clk_prepare_enable(mcan_class->hclk); + if (err) + return err; + + err = clk_prepare_enable(mcan_class->cclk); + if (err) + clk_disable_unprepare(mcan_class->hclk); + + return err; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_RUNTIME_PM_OPS(m_can_runtime_suspend, + m_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove_new = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng "); +MODULE_AUTHOR("Dan Murphy "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers"); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c new file mode 100644 index 0000000000..ae8c42f5de --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +// SPI to CAN driver for the Texas Instruments TCAN4x5x +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include "tcan4x5x.h" + +#define TCAN4X5X_EXT_CLK_DEF 40000000 + +#define TCAN4X5X_DEV_ID1 0x00 +#define TCAN4X5X_DEV_ID1_TCAN 0x4e414354 /* ASCII TCAN */ +#define TCAN4X5X_DEV_ID2 0x04 +#define TCAN4X5X_REV 0x08 +#define TCAN4X5X_STATUS 0x0C +#define TCAN4X5X_ERROR_STATUS_MASK 0x10 +#define TCAN4X5X_CONTROL 0x14 + +#define TCAN4X5X_CONFIG 0x800 +#define TCAN4X5X_TS_PRESCALE 0x804 +#define TCAN4X5X_TEST_REG 0x808 +#define TCAN4X5X_INT_FLAGS 0x820 +#define TCAN4X5X_MCAN_INT_REG 0x824 +#define TCAN4X5X_INT_EN 0x830 + +/* Interrupt bits */ +#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30) +#define TCAN4X5X_CANHCANL_INT_EN BIT(29) +#define TCAN4X5X_CANHBAT_INT_EN BIT(28) +#define TCAN4X5X_CANLGND_INT_EN BIT(27) +#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26) +#define TCAN4X5X_CANBUSGND_INT_EN BIT(25) +#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24) +#define TCAN4X5X_UVSUP_INT_EN BIT(22) +#define TCAN4X5X_UVIO_INT_EN BIT(21) +#define TCAN4X5X_TSD_INT_EN BIT(19) +#define TCAN4X5X_ECCERR_INT_EN BIT(16) +#define TCAN4X5X_CANINT_INT_EN BIT(15) +#define TCAN4X5X_LWU_INT_EN BIT(14) +#define TCAN4X5X_CANSLNT_INT_EN BIT(10) +#define TCAN4X5X_CANDOM_INT_EN BIT(8) +#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5) +#define TCAN4X5X_BUS_FAULT BIT(4) +#define TCAN4X5X_MCAN_INT BIT(1) +#define TCAN4X5X_ENABLE_TCAN_INT \ + (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \ + TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN) + +/* MCAN Interrupt bits */ +#define TCAN4X5X_MCAN_IR_ARA BIT(29) +#define TCAN4X5X_MCAN_IR_PED BIT(28) +#define TCAN4X5X_MCAN_IR_PEA BIT(27) +#define TCAN4X5X_MCAN_IR_WD BIT(26) +#define TCAN4X5X_MCAN_IR_BO BIT(25) +#define TCAN4X5X_MCAN_IR_EW BIT(24) +#define TCAN4X5X_MCAN_IR_EP BIT(23) +#define TCAN4X5X_MCAN_IR_ELO BIT(22) +#define TCAN4X5X_MCAN_IR_BEU BIT(21) +#define TCAN4X5X_MCAN_IR_BEC BIT(20) +#define TCAN4X5X_MCAN_IR_DRX BIT(19) +#define TCAN4X5X_MCAN_IR_TOO BIT(18) +#define TCAN4X5X_MCAN_IR_MRAF BIT(17) +#define TCAN4X5X_MCAN_IR_TSW BIT(16) +#define TCAN4X5X_MCAN_IR_TEFL BIT(15) +#define TCAN4X5X_MCAN_IR_TEFF BIT(14) +#define TCAN4X5X_MCAN_IR_TEFW BIT(13) +#define TCAN4X5X_MCAN_IR_TEFN BIT(12) +#define TCAN4X5X_MCAN_IR_TFE BIT(11) +#define TCAN4X5X_MCAN_IR_TCF BIT(10) +#define TCAN4X5X_MCAN_IR_TC BIT(9) +#define TCAN4X5X_MCAN_IR_HPM BIT(8) +#define TCAN4X5X_MCAN_IR_RF1L BIT(7) +#define TCAN4X5X_MCAN_IR_RF1F BIT(6) +#define TCAN4X5X_MCAN_IR_RF1W BIT(5) +#define TCAN4X5X_MCAN_IR_RF1N BIT(4) +#define TCAN4X5X_MCAN_IR_RF0L BIT(3) +#define TCAN4X5X_MCAN_IR_RF0F BIT(2) +#define TCAN4X5X_MCAN_IR_RF0W BIT(1) +#define TCAN4X5X_MCAN_IR_RF0N BIT(0) +#define TCAN4X5X_ENABLE_MCAN_INT \ + (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \ + TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \ + TCAN4X5X_MCAN_IR_RF1F) + +#define TCAN4X5X_MRAM_START 0x8000 +#define TCAN4X5X_MRAM_SIZE 0x800 +#define TCAN4X5X_MCAN_OFFSET 0x1000 + +#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff +#define TCAN4X5X_SET_ALL_INT 0xffffffff + +#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6)) +#define TCAN4X5X_MODE_SLEEP 0x00 +#define TCAN4X5X_MODE_STANDBY BIT(6) +#define TCAN4X5X_MODE_NORMAL BIT(7) + +#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) +#define TCAN4X5X_DISABLE_INH_MSK BIT(9) + +#define TCAN4X5X_SW_RESET BIT(2) + +#define TCAN4X5X_MCAN_CONFIGURED BIT(5) +#define TCAN4X5X_WATCHDOG_EN BIT(3) +#define TCAN4X5X_WD_60_MS_TIMER 0 +#define TCAN4X5X_WD_600_MS_TIMER BIT(28) +#define TCAN4X5X_WD_3_S_TIMER BIT(29) +#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) + +struct tcan4x5x_version_info { + const char *name; + u32 id2_register; + + bool has_wake_pin; + bool has_state_pin; +}; + +enum { + TCAN4552 = 0, + TCAN4553, + TCAN4X5X, +}; + +static const struct tcan4x5x_version_info tcan4x5x_versions[] = { + [TCAN4552] = { + .name = "4552", + .id2_register = 0x32353534, + }, + [TCAN4553] = { + .name = "4553", + .id2_register = 0x33353534, + }, + /* generic version with no id2_register at the end */ + [TCAN4X5X] = { + .name = "generic", + .has_wake_pin = true, + .has_state_pin = true, + }, +}; + +static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct tcan4x5x_priv, cdev); +} + +static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) +{ + int wake_state = 0; + + if (priv->device_state_gpio) + wake_state = gpiod_get_value(priv->device_state_gpio); + + if (priv->device_wake_gpio && wake_state) { + gpiod_set_value(priv->device_wake_gpio, 0); + usleep_range(5, 50); + gpiod_set_value(priv->device_wake_gpio, 1); + } +} + +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + +static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + u32 val; + + regmap_read(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, &val); + + return val; +} + +static int tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset, + void *val, size_t val_count) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_bulk_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); +} + +static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_write(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, val); +} + +static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, + int addr_offset, const void *val, size_t val_count) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_bulk_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); +} + +static int tcan4x5x_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR_OR_NULL(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + +static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, + int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_write(priv->regmap, reg, val); +} + +static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) +{ + int ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, + TCAN4X5X_CLEAR_ALL_INT); +} + +static int tcan4x5x_init(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + tcan4x5x_check_wake(tcan4x5x); + + ret = tcan4x5x_clear_interrupts(cdev); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN, + TCAN4X5X_ENABLE_TCAN_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); + if (ret) + return ret; + + return ret; +} + +static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_WAKE_MSK, 0x00); +} + +static int tcan4x5x_disable_state(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_INH_MSK, 0x01); +} + +static const struct tcan4x5x_version_info +*tcan4x5x_find_version(struct tcan4x5x_priv *priv) +{ + u32 val; + int ret; + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID1, &val); + if (ret) + return ERR_PTR(ret); + + if (val != TCAN4X5X_DEV_ID1_TCAN) { + dev_err(&priv->spi->dev, "Not a tcan device %x\n", val); + return ERR_PTR(-ENODEV); + } + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID2, &val); + if (ret) + return ERR_PTR(ret); + + for (int i = 0; i != ARRAY_SIZE(tcan4x5x_versions); ++i) { + const struct tcan4x5x_version_info *vinfo = &tcan4x5x_versions[i]; + + if (!vinfo->id2_register || val == vinfo->id2_register) { + dev_info(&priv->spi->dev, "Detected TCAN device version %s\n", + vinfo->name); + return vinfo; + } + } + + return &tcan4x5x_versions[TCAN4X5X]; +} + +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, + const struct tcan4x5x_version_info *version_info) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + if (version_info->has_wake_pin) { + tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + tcan4x5x_disable_wake(cdev); + } + } + + tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(tcan4x5x->reset_gpio)) + tcan4x5x->reset_gpio = NULL; + + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; + + if (version_info->has_state_pin) { + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) { + tcan4x5x->device_state_gpio = NULL; + tcan4x5x_disable_state(cdev); + } + } + + return 0; +} + +static struct m_can_ops tcan4x5x_ops = { + .init = tcan4x5x_init, + .read_reg = tcan4x5x_read_reg, + .write_reg = tcan4x5x_write_reg, + .write_fifo = tcan4x5x_write_fifo, + .read_fifo = tcan4x5x_read_fifo, + .clear_interrupts = tcan4x5x_clear_interrupts, +}; + +static int tcan4x5x_can_probe(struct spi_device *spi) +{ + const struct tcan4x5x_version_info *version_info; + struct tcan4x5x_priv *priv; + struct m_can_classdev *mcan_class; + int freq, ret; + + mcan_class = m_can_class_allocate_dev(&spi->dev, + sizeof(struct tcan4x5x_priv)); + if (!mcan_class) + return -ENOMEM; + + ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE); + if (ret) + goto out_m_can_class_free_dev; + + priv = cdev_to_priv(mcan_class); + + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); + if (PTR_ERR(priv->power) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_m_can_class_free_dev; + } else { + priv->power = NULL; + } + + m_can_class_get_clocks(mcan_class); + if (IS_ERR(mcan_class->cclk)) { + dev_err(&spi->dev, "no CAN clock source defined\n"); + freq = TCAN4X5X_EXT_CLK_DEF; + } else { + freq = clk_get_rate(mcan_class->cclk); + } + + /* Sanity check */ + if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) { + dev_err(&spi->dev, "Clock frequency is out of supported range %d\n", + freq); + ret = -ERANGE; + goto out_m_can_class_free_dev; + } + + priv->spi = spi; + + mcan_class->pm_clock_support = 0; + mcan_class->can.clock.freq = freq; + mcan_class->dev = &spi->dev; + mcan_class->ops = &tcan4x5x_ops; + mcan_class->is_peripheral = true; + mcan_class->net->irq = spi->irq; + + spi_set_drvdata(spi, priv); + + /* Configure the SPI bus */ + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret) { + dev_err(&spi->dev, "SPI setup failed %pe\n", ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + ret = tcan4x5x_regmap_init(priv); + if (ret) { + dev_err(&spi->dev, "regmap init failed %pe\n", ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + ret = tcan4x5x_power_enable(priv->power, 1); + if (ret) { + dev_err(&spi->dev, "Enabling regulator failed %pe\n", + ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + version_info = tcan4x5x_find_version(priv); + if (IS_ERR(version_info)) { + ret = PTR_ERR(version_info); + goto out_power; + } + + ret = tcan4x5x_get_gpios(mcan_class, version_info); + if (ret) { + dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + ret = tcan4x5x_init(mcan_class); + if (ret) { + dev_err(&spi->dev, "tcan initialization failed %pe\n", + ERR_PTR(ret)); + goto out_power; + } + + ret = m_can_class_register(mcan_class); + if (ret) { + dev_err(&spi->dev, "Failed registering m_can device %pe\n", + ERR_PTR(ret)); + goto out_power; + } + + netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); + return 0; + +out_power: + tcan4x5x_power_enable(priv->power, 0); + out_m_can_class_free_dev: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static void tcan4x5x_can_remove(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + + m_can_class_unregister(&priv->cdev); + + tcan4x5x_power_enable(priv->power, 0); + + m_can_class_free_dev(priv->cdev.net); +} + +static const struct of_device_id tcan4x5x_of_match[] = { + { + .compatible = "ti,tcan4x5x", + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, tcan4x5x_of_match); + +static const struct spi_device_id tcan4x5x_id_table[] = { + { + .name = "tcan4x5x", + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table); + +static struct spi_driver tcan4x5x_can_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = tcan4x5x_of_match, + .pm = NULL, + }, + .id_table = tcan4x5x_id_table, + .probe = tcan4x5x_can_probe, + .remove = tcan4x5x_can_remove, +}; +module_spi_driver(tcan4x5x_can_driver); + +MODULE_AUTHOR("Dan Murphy "); +MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c new file mode 100644 index 0000000000..fafa6daa67 --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x-regmap.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver +// +// Copyright (c) 2020 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2018-2019 Texas Instruments Incorporated +// http://www.ti.com/ + +#include "tcan4x5x.h" + +#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24) +#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24) + +#define TCAN4X5X_MAX_REGISTER 0x87fc + +static int tcan4x5x_regmap_gather_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len) +{ + struct spi_device *spi = context; + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; + struct spi_transfer xfer[] = { + { + .tx_buf = buf_tx, + .len = sizeof(buf_tx->cmd) + val_len, + }, + }; + + memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) + + sizeof(buf_tx->cmd.addr)); + tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); + memcpy(buf_tx->data, val, val_len); + + return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); +} + +static int tcan4x5x_regmap_write(void *context, const void *data, size_t count) +{ + return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32), + data + sizeof(__be32), + count - sizeof(__be32)); +} + +static int tcan4x5x_regmap_read(void *context, + const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct spi_device *spi = context; + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx; + struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; + struct spi_transfer xfer[2] = { + { + .tx_buf = buf_tx, + } + }; + struct spi_message msg; + int err; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + + memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) + + sizeof(buf_tx->cmd.addr)); + tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); + + if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { + xfer[0].len = sizeof(buf_tx->cmd); + + xfer[1].rx_buf = val_buf; + xfer[1].len = val_len; + spi_message_add_tail(&xfer[1], &msg); + } else { + xfer[0].rx_buf = buf_rx; + xfer[0].len = sizeof(buf_tx->cmd) + val_len; + + if (TCAN4X5X_SANITIZE_SPI) + memset(buf_tx->data, 0x0, val_len); + } + + err = spi_sync(spi, &msg); + if (err) + return err; + + if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)) + memcpy(val_buf, buf_rx->data, val_len); + + return 0; +} + +static const struct regmap_range tcan4x5x_reg_table_wr_range[] = { + /* Device ID and SPI Registers */ + regmap_reg_range(0x000c, 0x0010), + /* Device configuration registers and Interrupt Flags*/ + regmap_reg_range(0x0800, 0x080c), + regmap_reg_range(0x0820, 0x0820), + regmap_reg_range(0x0830, 0x0830), + /* M_CAN */ + regmap_reg_range(0x100c, 0x102c), + regmap_reg_range(0x1048, 0x1048), + regmap_reg_range(0x1050, 0x105c), + regmap_reg_range(0x1080, 0x1088), + regmap_reg_range(0x1090, 0x1090), + regmap_reg_range(0x1098, 0x10a0), + regmap_reg_range(0x10a8, 0x10b0), + regmap_reg_range(0x10b8, 0x10c0), + regmap_reg_range(0x10c8, 0x10c8), + regmap_reg_range(0x10d0, 0x10d4), + regmap_reg_range(0x10e0, 0x10e4), + regmap_reg_range(0x10f0, 0x10f0), + regmap_reg_range(0x10f8, 0x10f8), + /* MRAM */ + regmap_reg_range(0x8000, 0x87fc), +}; + +static const struct regmap_range tcan4x5x_reg_table_rd_range[] = { + regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */ + regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/ + regmap_reg_range(0x1000, 0x10fc), /* M_CAN */ + regmap_reg_range(0x8000, 0x87fc), /* MRAM */ +}; + +static const struct regmap_access_table tcan4x5x_reg_table_wr = { + .yes_ranges = tcan4x5x_reg_table_wr_range, + .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range), +}; + +static const struct regmap_access_table tcan4x5x_reg_table_rd = { + .yes_ranges = tcan4x5x_reg_table_rd_range, + .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range), +}; + +static const struct regmap_config tcan4x5x_regmap = { + .reg_bits = 24, + .reg_stride = 4, + .pad_bits = 8, + .val_bits = 32, + .wr_table = &tcan4x5x_reg_table_wr, + .rd_table = &tcan4x5x_reg_table_rd, + .max_register = TCAN4X5X_MAX_REGISTER, + .cache_type = REGCACHE_NONE, + .read_flag_mask = (__force unsigned long) + cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ), + .write_flag_mask = (__force unsigned long) + cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE), +}; + +static const struct regmap_bus tcan4x5x_bus = { + .write = tcan4x5x_regmap_write, + .gather_write = tcan4x5x_regmap_gather_write, + .read = tcan4x5x_regmap_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, + .max_raw_read = 256, + .max_raw_write = 256, +}; + +int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv) +{ + priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus, + priv->spi, &tcan4x5x_regmap); + return PTR_ERR_OR_ZERO(priv->regmap); +} diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h new file mode 100644 index 0000000000..e62c030d3e --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver + * + * Copyright (c) 2020 Pengutronix, + * Marc Kleine-Budde + */ + +#ifndef _TCAN4X5X_H +#define _TCAN4X5X_H + +#include +#include +#include +#include + +#include "m_can.h" + +#define TCAN4X5X_SANITIZE_SPI 1 + +struct __packed tcan4x5x_buf_cmd { + u8 cmd; + __be16 addr; + u8 len; +}; + +struct tcan4x5x_map_buf { + struct tcan4x5x_buf_cmd cmd; + u8 data[256 * sizeof(u32)]; +} ____cacheline_aligned; + +struct tcan4x5x_priv { + struct m_can_classdev cdev; + + struct regmap *regmap; + struct spi_device *spi; + + struct gpio_desc *reset_gpio; + struct gpio_desc *device_wake_gpio; + struct gpio_desc *device_state_gpio; + struct regulator *power; + + struct tcan4x5x_map_buf map_buf_rx; + struct tcan4x5x_map_buf map_buf_tx; +}; + +static inline void +tcan4x5x_spi_cmd_set_len(struct tcan4x5x_buf_cmd *cmd, u8 len) +{ + /* number of u32 */ + cmd->len = len >> 2; +} + +int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv); + +#endif -- cgit v1.2.3