diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/bus/mhi | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/bus/mhi')
-rw-r--r-- | drivers/bus/mhi/Kconfig | 9 | ||||
-rw-r--r-- | drivers/bus/mhi/Makefile | 5 | ||||
-rw-r--r-- | drivers/bus/mhi/common.h | 326 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/Kconfig | 10 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/Makefile | 2 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/internal.h | 218 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/main.c | 1630 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/mmio.c | 273 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/ring.c | 207 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/sm.c | 154 | ||||
-rw-r--r-- | drivers/bus/mhi/host/Kconfig | 31 | ||||
-rw-r--r-- | drivers/bus/mhi/host/Makefile | 6 | ||||
-rw-r--r-- | drivers/bus/mhi/host/boot.c | 544 | ||||
-rw-r--r-- | drivers/bus/mhi/host/debugfs.c | 413 | ||||
-rw-r--r-- | drivers/bus/mhi/host/init.c | 1464 | ||||
-rw-r--r-- | drivers/bus/mhi/host/internal.h | 383 | ||||
-rw-r--r-- | drivers/bus/mhi/host/main.c | 1705 | ||||
-rw-r--r-- | drivers/bus/mhi/host/pci_generic.c | 1226 | ||||
-rw-r--r-- | drivers/bus/mhi/host/pm.c | 1283 |
19 files changed, 9889 insertions, 0 deletions
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig new file mode 100644 index 000000000..b39a11e6c --- /dev/null +++ b/drivers/bus/mhi/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2021, Linaro Ltd. +# + +source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile new file mode 100644 index 000000000..46981331b --- /dev/null +++ b/drivers/bus/mhi/Makefile @@ -0,0 +1,5 @@ +# Host MHI stack +obj-y += host/ + +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h new file mode 100644 index 000000000..f794b9c80 --- /dev/null +++ b/drivers/bus/mhi/common.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_COMMON_H +#define _MHI_COMMON_H + +#include <linux/bitfield.h> +#include <linux/mhi.h> + +/* MHI registers */ +#define MHIREGLEN 0x00 +#define MHIVER 0x08 +#define MHICFG 0x10 +#define CHDBOFF 0x18 +#define ERDBOFF 0x20 +#define BHIOFF 0x28 +#define BHIEOFF 0x2c +#define DEBUGOFF 0x30 +#define MHICTRL 0x38 +#define MHISTATUS 0x48 +#define CCABAP_LOWER 0x58 +#define CCABAP_HIGHER 0x5c +#define ECABAP_LOWER 0x60 +#define ECABAP_HIGHER 0x64 +#define CRCBAP_LOWER 0x68 +#define CRCBAP_HIGHER 0x6c +#define CRDB_LOWER 0x70 +#define CRDB_HIGHER 0x74 +#define MHICTRLBASE_LOWER 0x80 +#define MHICTRLBASE_HIGHER 0x84 +#define MHICTRLLIMIT_LOWER 0x88 +#define MHICTRLLIMIT_HIGHER 0x8c +#define MHIDATABASE_LOWER 0x98 +#define MHIDATABASE_HIGHER 0x9c +#define MHIDATALIMIT_LOWER 0xa0 +#define MHIDATALIMIT_HIGHER 0xa4 + +/* MHI BHI registers */ +#define BHI_BHIVERSION_MINOR 0x00 +#define BHI_BHIVERSION_MAJOR 0x04 +#define BHI_IMGADDR_LOW 0x08 +#define BHI_IMGADDR_HIGH 0x0c +#define BHI_IMGSIZE 0x10 +#define BHI_RSVD1 0x14 +#define BHI_IMGTXDB 0x18 +#define BHI_RSVD2 0x1c +#define BHI_INTVEC 0x20 +#define BHI_RSVD3 0x24 +#define BHI_EXECENV 0x28 +#define BHI_STATUS 0x2c +#define BHI_ERRCODE 0x30 +#define BHI_ERRDBG1 0x34 +#define BHI_ERRDBG2 0x38 +#define BHI_ERRDBG3 0x3c +#define BHI_SERIALNU 0x40 +#define BHI_SBLANTIROLLVER 0x44 +#define BHI_NUMSEG 0x48 +#define BHI_MSMHWID(n) (0x4c + (0x4 * (n))) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) +#define BHI_RSVD5 0xc4 + +/* BHI register bits */ +#define BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHI_TXDB_SEQNUM_SHFT 0 +#define BHI_STATUS_MASK GENMASK(31, 30) +#define BHI_STATUS_ERROR 0x03 +#define BHI_STATUS_SUCCESS 0x02 +#define BHI_STATUS_RESET 0x00 + +/* MHI BHIE registers */ +#define BHIE_MSMSOCID_OFFS 0x00 +#define BHIE_TXVECADDR_LOW_OFFS 0x2c +#define BHIE_TXVECADDR_HIGH_OFFS 0x30 +#define BHIE_TXVECSIZE_OFFS 0x34 +#define BHIE_TXVECDB_OFFS 0x3c +#define BHIE_TXVECSTATUS_OFFS 0x44 +#define BHIE_RXVECADDR_LOW_OFFS 0x60 +#define BHIE_RXVECADDR_HIGH_OFFS 0x64 +#define BHIE_RXVECSIZE_OFFS 0x68 +#define BHIE_RXVECDB_OFFS 0x70 +#define BHIE_RXVECSTATUS_OFFS 0x78 + +/* BHIE register bits */ +#define BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_TXVECDB_SEQNUM_SHFT 0 +#define BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT 0 +#define BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30) +#define BHIE_TXVECSTATUS_STATUS_SHFT 30 +#define BHIE_TXVECSTATUS_STATUS_RESET 0x00 +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02 +#define BHIE_TXVECSTATUS_STATUS_ERROR 0x03 +#define BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_RXVECDB_SEQNUM_SHFT 0 +#define BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT 0 +#define BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30) +#define BHIE_RXVECSTATUS_STATUS_SHFT 30 +#define BHIE_RXVECSTATUS_STATUS_RESET 0x00 +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02 +#define BHIE_RXVECSTATUS_STATUS_ERROR 0x03 + +/* MHI register bits */ +#define MHICFG_NHWER_MASK GENMASK(31, 24) +#define MHICFG_NER_MASK GENMASK(23, 16) +#define MHICFG_NHWCH_MASK GENMASK(15, 8) +#define MHICFG_NCH_MASK GENMASK(7, 0) +#define MHICTRL_MHISTATE_MASK GENMASK(15, 8) +#define MHICTRL_RESET_MASK BIT(1) +#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8) +#define MHISTATUS_SYSERR_MASK BIT(2) +#define MHISTATUS_READY_MASK BIT(0) + +/* Command Ring Element macros */ +/* No operation command */ +#define MHI_TRE_CMD_NOOP_PTR 0 +#define MHI_TRE_CMD_NOOP_DWORD0 0 +#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), MHI_CMD_NOP)) + +/* Channel reset command */ +#define MHI_TRE_CMD_RESET_PTR 0 +#define MHI_TRE_CMD_RESET_DWORD0 0 +#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_RESET_CHAN)) + +/* Channel stop command */ +#define MHI_TRE_CMD_STOP_PTR 0 +#define MHI_TRE_CMD_STOP_DWORD0 0 +#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_STOP_CHAN)) + +/* Channel start command */ +#define MHI_TRE_CMD_START_PTR 0 +#define MHI_TRE_CMD_START_DWORD0 0 +#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_START_CHAN)) + +#define MHI_TRE_GET_DWORD(tre, word) le32_to_cpu((tre)->dword[(word)]) +#define MHI_TRE_GET_CMD_CHID(tre) FIELD_GET(GENMASK(31, 24), MHI_TRE_GET_DWORD(tre, 1)) +#define MHI_TRE_GET_CMD_TYPE(tre) FIELD_GET(GENMASK(23, 16), MHI_TRE_GET_DWORD(tre, 1)) + +/* Event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_TRE_EV_DWORD0(code, len) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code) | \ + FIELD_PREP(GENMASK(15, 0), len)) +#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), type)) +#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_LEN(tre) FIELD_GET(GENMASK(15, 0), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_CHID(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_TYPE(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_STATE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_EXECENV(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) MHI_TRE_GET_EV_PTR(tre) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) + +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* Transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) +#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) +#define MHI_TRE_TYPE_TRANSFER 2 +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \ + MHI_TRE_TYPE_TRANSFER) | \ + FIELD_PREP(BIT(10), bei) | \ + FIELD_PREP(BIT(9), ieot) | \ + FIELD_PREP(BIT(8), ieob) | \ + FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) + +/* RSC transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) cpu_to_le32(cookie) +#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \ + MHI_PKT_TYPE_COALESCING)) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +/* Channel state */ +enum mhi_ch_state { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_ENABLED, + MHI_CH_STATE_RUNNING, + MHI_CH_STATE_SUSPENDED, + MHI_CH_STATE_STOP, + MHI_CH_STATE_ERROR, +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +struct mhi_event_ctxt { + __le32 intmod; + __le32 ertype; + __le32 msivec; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctxt { + __le32 chcfg; + __le32 chtype; + __le32 erindex; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_ring_element { + __le64 ptr; + __le32 dword[2]; +}; + +static inline const char *mhi_state_str(enum mhi_state state) +{ + switch (state) { + case MHI_STATE_RESET: + return "RESET"; + case MHI_STATE_READY: + return "READY"; + case MHI_STATE_M0: + return "M0"; + case MHI_STATE_M1: + return "M1"; + case MHI_STATE_M2: + return "M2"; + case MHI_STATE_M3: + return "M3"; + case MHI_STATE_M3_FAST: + return "M3 FAST"; + case MHI_STATE_BHI: + return "BHI"; + case MHI_STATE_SYS_ERR: + return "SYS ERROR"; + default: + return "Unknown state"; + } +}; + +#endif /* _MHI_COMMON_H */ diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000..90ab3b040 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000..aad85f180 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000..a2125fa5f --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include <linux/bitfield.h> + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring_item { + struct list_head node; + struct mhi_ep_ring *ring; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; + u32 er_index; + u32 irq_vector; + bool started; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +/* MHI Ring related functions */ +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring); + +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000..34e0ba6f5 --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,1630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/dma-direction.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mhi_ep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include "internal.h" + +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +static int mhi_ep_destroy_device(struct device *dev, void *data); + +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element *event; + int ret; + + event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event->dword[0] = MHI_TRE_EV_DWORD0(code, len); + event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); + kfree(event); + + return ret; +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element *event; + int ret; + + event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->dword[0] = MHI_SC_EV_DWORD0(state); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kfree(event); + + return ret; +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element *event; + int ret; + + event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->dword[0] = MHI_EE_EV_DWORD0(exec_env); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kfree(event); + + return ret; +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element *event; + int ret; + + event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event->dword[0] = MHI_CC_EV_DWORD0(code); + event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kfree(event); + + return ret; +} + +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + mutex_lock(&chan->lock); + + /* + * The ring could've stopped while we waited to grab the (chan->lock), so do + * a sanity check before going further. + */ + if (!ring->started) { + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + bool mhi_reset; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { + dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + enum mhi_state cur_state; + + mhi_ep_power_down(mhi_cntrl); + + mutex_lock(&mhi_cntrl->state_lock); + + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) + mhi_ep_power_up(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->state_lock); +} + +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) { + mhi_ep_abort_transfer(mhi_cntrl); + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); + } +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); + /* Set channel state to SUSPENDED */ + mhi_chan->state = MHI_CH_STATE_SUSPENDED; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +/* + * MHI channels are always defined in pairs with UL as the even numbered + * channel and DL as odd numbered one. This function gets UL channel (primary) + * as the ch_id and always looks after the next entry in channel list for + * the corresponding DL channel (secondary). + */ +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + struct device *dev = mhi_cntrl->cntrl_dev; + struct mhi_ep_device *mhi_dev; + int ret; + + /* Check if the channel name is same for both UL and DL */ + if (strcmp(mhi_chan->name, mhi_chan[1].name)) { + dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", + mhi_chan->name, mhi_chan[1].name); + return -EINVAL; + } + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + /* Configure primary channel */ + mhi_dev->ul_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + mhi_dev->dl_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + ret = dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + if (ret) { + put_device(&mhi_dev->dev); + return ret; + } + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); + spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->state_lock); + mutex_init(&mhi_cntrl->event_lock); + + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_destroy_wq; + + mhi_cntrl->index = ret; + + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_free_irq; + } + + ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + if (ret) + goto err_put_dev; + + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + destroy_workqueue(mhi_cntrl->wq); + + free_irq(mhi_cntrl->irq, mhi_cntrl); + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, + .uevent = mhi_ep_uevent, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000..b5bfd22f2 --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/mhi_ep.h> + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000..115518ec7 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/mhi_ep.h> +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + __le64 rlen; + + memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); + + return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; +} + +static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + int ret; + + /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + + if (end) { + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, + &ring->ring_cache[0], + end * sizeof(struct mhi_ring_element)); + if (ret < 0) + return ret; + } + } + + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + + return 0; +} + +static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + wr_ptr = mhi_ep_mmio_get_db(ring); + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t old_offset = 0; + u32 num_free_elem; + __le64 rp; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (!num_free_elem) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + + /* Update rp in ring context */ + rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); + memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); + + ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), + sizeof(*el)); + if (ret < 0) + return ret; + + return 0; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->db_offset_h = EP_CRDB_HIGHER; + ring->db_offset_l = EP_CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + __le64 val; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64)); + ring->rbase = le64_to_cpu(val); + + if (ring->type == RING_TYPE_CH) + ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); + + if (ring->type == RING_TYPE_ER) + ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + + /* During ring init, both rp and wp are equal */ + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); + ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64)); + ret = mhi_ep_cache_ring(ring, le64_to_cpu(val)); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + ring->started = true; + + return 0; +} + +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->started = false; + kfree(ring->ring_cache); + ring->ring_cache = NULL; +} diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000..fd200b2ac --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/errno.h> +#include <linux/mhi_ep.h> +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + /* If MHI is in M3, resume suspended channels */ + mutex_lock(&mhi_cntrl->state_lock); + + old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + goto err_unlock; + } + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + goto err_unlock; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + goto err_unlock; + } + } + +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + mutex_lock(&mhi_cntrl->state_lock); + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + goto err_unlock; + } + + mhi_ep_suspend_channels(mhi_cntrl); + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + goto err_unlock; + } + +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + mutex_lock(&mhi_cntrl->state_lock); + + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + ret = -EIO; + goto err_unlock; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + if (ret) + mhi_ep_handle_syserr(mhi_cntrl); + +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; +} diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig new file mode 100644 index 000000000..da5cd0c9f --- /dev/null +++ b/drivers/bus/mhi/host/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + +config MHI_BUS_DEBUG + bool "Debugfs support for the MHI bus" + depends on MHI_BUS && DEBUG_FS + help + Enable debugfs support for use with the MHI transport. Allows + reading and/or modifying some values within the MHI controller + for debug and test purposes. + +config MHI_BUS_PCI_GENERIC + tristate "MHI PCI controller driver" + depends on MHI_BUS + depends on PCI + help + This driver provides MHI PCI controller driver for devices such as + Qualcomm SDX55 based PCIe modems. + diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile new file mode 100644 index 000000000..859c2f384 --- /dev/null +++ b/drivers/bus/mhi/host/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MHI_BUS) += mhi.o +mhi-y := init.o main.o pm.o boot.o +mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o + +obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o +mhi_pci_generic-y += pci_generic.o diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c new file mode 100644 index 000000000..55e909f8c --- /dev/null +++ b/drivers/bus/mhi/host/boot.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "internal.h" + +/* Setup RDDM vector table for RDDM transfer and program RXVEC */ +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 sequence_id; + unsigned int i; + int ret; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } + + dev_dbg(dev, "BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); + + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + if (ret) { + dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n"); + return ret; + } + + dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", + &mhi_buf->dma_addr, mhi_buf->len, sequence_id); + + return 0; +} + +/* Collect RDDM buffer during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 rx_status; + enum mhi_ee_type ee; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_state_str(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting RDDM buffer. After + * returning from this function, we expect the device to reset. + * + * Normaly, we read/write pm_state only after grabbing the + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee that this state change would take effect since + * we're setting it w/o grabbing pm_lock + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* + * Make sure device is not already in RDDM. In case the device asserts + * and a kernel panic follows, device will already be in RDDM. + * Do not trigger SYS ERR again and proceed with waiting for + * image download completion. + */ + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_MAX) + goto error_exit_rddm; + + if (ee != MHI_EE_RDDM) { + dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + dev_dbg(dev, "Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* Hardware reset so force device to enter RDDM */ + dev_dbg(dev, + "Did not enter RDDM, do a host req reset\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + } + + dev_dbg(dev, + "Waiting for RDDM image download via BHIe, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) + return 0; + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); + +error_exit_rddm: + dev_err(dev, "RDDM transfer failed. Current EE: %s\n", + TO_MHI_EXEC_STR(ee)); + + return -EIO; +} + +/* Download RDDM image from device */ +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 rx_status; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(mhi_download_rddm_image); + +static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status, sequence_id; + int ret; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); + dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", + sequence_id); + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); + read_unlock_bh(pm_lock); + + if (ret) + return ret; + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) + return -EIO; + + return (!ret) ? -ETIMEDOUT : 0; +} + +static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val, session_id; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); + dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", + session_id); + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + dev_err(dev, "Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + dev_err(dev, "Reg: %s value: 0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (!ret) ? -ETIMEDOUT : 0; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* Vector table is the last entry */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + vec_size, &mhi_buf->dma_addr, + GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + buf += to_cpy; + remainder -= to_cpy; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) +{ + const struct firmware *firmware = NULL; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + const char *fw_name; + void *buf; + dma_addr_t dma_addr; + size_t size; + int i, ret; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, "Device MHI is not in valid state\n"); + return; + } + + /* save hardware info from BHI */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, + &mhi_cntrl->serial_number); + if (ret) + dev_err(dev, "Could not capture serial number via BHI\n"); + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), + &mhi_cntrl->oem_pk_hash[i]); + if (ret) { + dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); + break; + } + } + + /* wait for ready on pass through or any other execution environment */ + if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) + goto fw_load_ready_state; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + dev_err(dev, + "No firmware image defined or !sbl_size || !seg_len\n"); + goto error_fw_load; + } + + ret = request_firmware(&firmware, fw_name, dev); + if (ret) { + dev_err(dev, "Error loading firmware: %d\n", ret); + goto error_fw_load; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* SBL size provided is maximum size, not necessarily the image size */ + if (size > firmware->size) + size = firmware->size; + + buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, + GFP_KERNEL); + if (!buf) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Download image using BHI */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); + dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); + + /* Error or in EDL mode, we're done */ + if (ret) { + dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); + release_firmware(firmware); + goto error_fw_load; + } + + /* Wait for ready since EDL image was loaded */ + if (fw_name == mhi_cntrl->edl_image) { + release_firmware(firmware); + goto fw_load_ready_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * If we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + release_firmware(firmware); + +fw_load_ready_state: + /* Transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + dev_err(dev, "MHI did not enter READY state\n"); + goto error_ready_state; + } + + dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); + return; + +error_ready_state: + if (mhi_cntrl->fbc_download) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + +error_fw_load: + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_FW_DL_ERR) + wake_up_all(&mhi_cntrl->state_event); +} + +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) +{ + struct image_info *image_info = mhi_cntrl->fbc_image; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (!image_info) + return -EIO; + + ret = mhi_fw_load_bhie(mhi_cntrl, + /* Vector table is the last entry */ + &image_info->mhi_buf[image_info->entries - 1]); + if (ret) { + dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_FW_DL_ERR) + wake_up_all(&mhi_cntrl->state_event); + } + + return ret; +} diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c new file mode 100644 index 000000000..cfec7811d --- /dev/null +++ b/drivers/bus/mhi/host/debugfs.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include "internal.h" + +static int mhi_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + /* states */ + seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", + mhi_state_str(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->wake_set ? "true" : "false"); + + /* counters */ + seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, + mhi_cntrl->M3); + + seq_printf(m, " device wake: %u pending packets: %u\n", + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->pending_pkts)); + + return 0; +} + +static int mhi_debugfs_events_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; + i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index: %d is an offload event ring\n", + i); + continue; + } + + seq_printf(m, "Index: %d intmod count: %lu time: %lu", + i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> + __ffs(EV_CTX_INTMODC_MASK), + (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> + __ffs(EV_CTX_INTMODT_MASK)); + + seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), + le64_to_cpu(er_ctxt->rlen)); + + seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), + le64_to_cpu(er_ctxt->wp)); + + seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, + &mhi_event->db_cfg.db_val); + } + + return 0; +} + +static int mhi_debugfs_channels_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) is an offload channel\n", + mhi_chan->name, mhi_chan->chan); + continue; + } + + if (!mhi_chan->mhi_dev) + continue; + + seq_printf(m, + "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", + mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_CHSTATE_MASK) >> __ffs(CHAN_CTX_CHSTATE_MASK), + (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> + __ffs(CHAN_CTX_BRSTMODE_MASK), (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_POLLCFG_MASK) >> __ffs(CHAN_CTX_POLLCFG_MASK)); + + seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), + le32_to_cpu(chan_ctxt->erindex)); + + seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", + le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), + le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); + + seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", + ring->rp, ring->wp, + &mhi_chan->db_cfg.db_val); + } + + return 0; +} + +static int mhi_device_info_show(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + + seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", + mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", + mhi_dev->dev_wake); + + /* for transfer device types only */ + if (mhi_dev->dev_type == MHI_DEVICE_XFER) + seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", + mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); + + seq_puts((struct seq_file *)data, "\n"); + + return 0; +} + +static int mhi_debugfs_devices_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + /* Show controller and client(s) info */ + mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); + + return 0; +} + +static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + enum mhi_state state; + enum mhi_ee_type ee; + int i, ret = -EIO; + u32 val; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void __iomem *base; + } regs[] = { + { "MHI_REGLEN", MHIREGLEN, mhi_base}, + { "MHI_VER", MHIVER, mhi_base}, + { "MHI_CFG", MHICFG, mhi_base}, + { "MHI_CTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return ret; + + seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_state_str(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), + mhi_state_str(state)); + + for (i = 0; regs[i].name; i++) { + if (!regs[i].base) + continue; + ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, + &val); + if (ret) + continue; + + seq_printf(m, "%s: 0x%x\n", regs[i].name, val); + } + + return 0; +} + +static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + seq_printf(m, + "Wake count: %d\n%s\n", mhi_dev->dev_wake, + "Usage: echo get/put > device_wake to vote/unvote for M0"); + + return 0; +} + +static ssize_t mhi_debugfs_device_wake_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + char buf[16]; + int ret = -EINVAL; + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (!strncmp(buf, "get", 3)) { + ret = mhi_device_get_sync(mhi_dev); + } else if (!strncmp(buf, "put", 3)) { + mhi_device_put(mhi_dev); + ret = 0; + } + + return ret ? ret : count; +} + +static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); + + return 0; +} + +static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + u32 timeout_ms; + + if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} + +static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_states_show, inode->i_private); +} + +static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_events_show, inode->i_private); +} + +static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_channels_show, inode->i_private); +} + +static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_devices_show, inode->i_private); +} + +static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); +} + +static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); +} + +static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); +} + +static const struct file_operations debugfs_states_fops = { + .open = mhi_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_events_fops = { + .open = mhi_debugfs_events_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_channels_fops = { + .open = mhi_debugfs_channels_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_devices_fops = { + .open = mhi_debugfs_devices_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_regdump_fops = { + .open = mhi_debugfs_regdump_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_device_wake_fops = { + .open = mhi_debugfs_device_wake_open, + .write = mhi_debugfs_device_wake_write, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_timeout_ms_fops = { + .open = mhi_debugfs_timeout_ms_open, + .write = mhi_debugfs_timeout_ms_write, + .release = single_release, + .read = seq_read, +}; + +static struct dentry *mhi_debugfs_root; + +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->debugfs_dentry = + debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_debugfs_root); + + debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_states_fops); + debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_events_fops); + debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_channels_fops); + debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_devices_fops); + debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_regdump_fops); + debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_device_wake_fops); + debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_timeout_ms_fops); +} + +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); + mhi_cntrl->debugfs_dentry = NULL; +} + +void mhi_debugfs_init(void) +{ + mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); +} + +void mhi_debugfs_exit(void) +{ + debugfs_remove_recursive(mhi_debugfs_root); +} diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c new file mode 100644 index 000000000..04fbccff6 --- /dev/null +++ b/drivers/bus/mhi/host/init.c @@ -0,0 +1,1464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/bitfield.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include "internal.h" + +static DEFINE_IDA(mhi_controller_ida); + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PRIMARY BOOTLOADER", + [MHI_EE_SBL] = "SECONDARY BOOTLOADER", + [MHI_EE_AMSS] = "MISSION MODE", + [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", + [MHI_EE_WFW] = "WLAN FIRMWARE", + [MHI_EE_PTHRU] = "PASS THROUGH", + [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", + [MHI_EE_FP] = "FLASH PROGRAMMER", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", + [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", +}; + +const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { + [DEV_ST_TRANSITION_PBL] = "PBL", + [DEV_ST_TRANSITION_READY] = "READY", + [DEV_ST_TRANSITION_SBL] = "SBL", + [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", + [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", + [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", + [DEV_ST_TRANSITION_DISABLE] = "DISABLE", +}; + +const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { + [MHI_CH_STATE_TYPE_RESET] = "RESET", + [MHI_CH_STATE_TYPE_STOP] = "STOP", + [MHI_CH_STATE_TYPE_START] = "START", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_STATE_DISABLE] = "DISABLE", + [MHI_PM_STATE_POR] = "POWER ON RESET", + [MHI_PM_STATE_M0] = "M0", + [MHI_PM_STATE_M2] = "M2", + [MHI_PM_STATE_M3_ENTER] = "M?->M3", + [MHI_PM_STATE_M3] = "M3", + [MHI_PM_STATE_M3_EXIT] = "M3->M0", + [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", + [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", + [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", + [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", +}; + +const char *to_mhi_pm_state_str(u32 state) +{ + int index; + + if (state) + index = __fls(state); + + if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +static ssize_t serial_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return sysfs_emit(buf, "Serial Number: %u\n", + mhi_cntrl->serial_number); +} +static DEVICE_ATTR_RO(serial_number); + +static ssize_t oem_pk_hash_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int i, cnt = 0; + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) + cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", + i, mhi_cntrl->oem_pk_hash[i]); + + return cnt; +} +static DEVICE_ATTR_RO(oem_pk_hash); + +static ssize_t soc_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_soc_reset(mhi_cntrl); + return count; +} +static DEVICE_ATTR_WO(soc_reset); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_oem_pk_hash.attr, + &dev_attr_soc_reset.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mhi_dev); + +/* MHI protocol requires the transfer ring to be aligned with ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + return 0; +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; + int i, ret; + + /* if controller driver has set irq_flags, use it */ + if (mhi_cntrl->irq_flags) + irq_flags = mhi_cntrl->irq_flags; + + /* Setup BHI_INTVEC IRQ */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, + mhi_intvec_threaded_handler, + irq_flags, + "bhi", mhi_cntrl); + if (ret) + return ret; + /* + * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here. + * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that + * IRQ_NOAUTOEN is not applicable. + */ + disable_irq(mhi_cntrl->irq[0]); + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + if (mhi_event->irq >= mhi_cntrl->nr_irqs) { + dev_err(dev, "irq %d not available for event ring\n", + mhi_event->irq); + ret = -EINVAL; + goto error_request; + } + + ret = request_irq(mhi_cntrl->irq[mhi_event->irq], + mhi_irq_handler, + irq_flags, + "mhi", mhi_event); + if (ret) { + dev_err(dev, "Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->irq], i); + goto error_request; + } + + disable_irq(mhi_cntrl->irq[mhi_event->irq]); + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + u32 tmp; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + /* Setup channel ctxt */ + mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* Skip if it is an offload channel */ + if (mhi_chan->offload_ch) + continue; + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + tmp &= ~CHAN_CTX_BRSTMODE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); + tmp &= ~CHAN_CTX_POLLCFG_MASK; + tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; + } + + /* Setup event context */ + mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if it is an offload event */ + if (mhi_event->offload_ev) + continue; + + tmp = le32_to_cpu(er_ctxt->intmod); + tmp &= ~EV_CTX_INTMODC_MASK; + tmp &= ~EV_CTX_INTMODT_MASK; + tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); + er_ctxt->intmod = cpu_to_le32(tmp); + + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + er_ctxt->msivec = cpu_to_le32(mhi_event->irq); + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_ring_element); + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; + + /* + * If the read pointer equals to the write pointer, then the + * ring is empty + */ + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &er_ctxt->wp; + } + + /* Setup cmd context */ + ret = -ENOMEM; + mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * + NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_ring_element); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &cmd_ctxt->wp; + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + u32 offset; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + MHICTRLBASE_HIGHER, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, + lower_32_bits(mhi_cntrl->iova_stop), + }, + {0, 0} + }; + + dev_dbg(dev, "Initializing MHI registers\n"); + + /* Read channel db offset */ + ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { + dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); + return -ERANGE; + } + + /* Setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_cntrl->wake_set = false; + + /* Setup channel db address for each channel in tre_ring */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* Read event ring db offset */ + ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val); + if (ret) { + dev_err(dev, "Unable to read ERDBOFF register\n"); + return -EIO; + } + + if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { + dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); + return -ERANGE; + } + + /* Setup event db address for each ev_ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* Setup DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + /* Write to MMIO registers */ + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].val); + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, + mhi_cntrl->total_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, + mhi_cntrl->hw_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + if (!chan_ctxt->rbase) /* Already uninitialized */ + return; + + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + vfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + tre_ring->ctxt_wp = NULL; + chan_ctxt->rbase = 0; + chan_ctxt->rlen = 0; + chan_ctxt->rp = 0; + chan_ctxt->wp = 0; + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + /* Update to all cores */ + smp_wmb(); +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_ring_element); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = vzalloc(buf_ring->len); + + if (!buf_ring->base) { + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + return -ENOMEM; + } + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + /* Update to all cores */ + smp_wmb(); + + return 0; +} + +static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + const struct mhi_event_config *event_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i, num; + + num = config->num_events; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Populate event ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < num; i++) { + event_cfg = &config->event_cfg[i]; + + mhi_event->er_index = i; + mhi_event->ring.elements = event_cfg->num_elements; + mhi_event->intmod = event_cfg->irq_moderation_ms; + mhi_event->irq = event_cfg->irq; + + if (event_cfg->channel != U32_MAX) { + /* This event ring has a dedicated channel */ + mhi_event->chan = event_cfg->channel; + if (mhi_event->chan >= mhi_cntrl->max_chan) { + dev_err(dev, + "Event Ring channel not available\n"); + goto error_ev_cfg; + } + + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + /* Priority is fixed to 1 for now */ + mhi_event->priority = 1; + + mhi_event->db_cfg.brstmode = event_cfg->mode; + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_event->db_cfg.process_db = mhi_db_brstmode; + else + mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_event->data_type = event_cfg->data_type; + + switch (mhi_event->data_type) { + case MHI_ER_DATA: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + default: + dev_err(dev, "Event Ring type not supported\n"); + goto error_ev_cfg; + } + + mhi_event->hw_ring = event_cfg->hardware_event; + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = event_cfg->client_managed; + mhi_event->offload_ev = event_cfg->offload_channel; + mhi_event++; + } + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} + +static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + const struct mhi_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i; + u32 chan; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * The allocation of MHI channels can exceed 32KB in some scenarios, + * so to avoid any memory possible allocation failures, vzalloc is + * used here + */ + mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + sizeof(*mhi_cntrl->mhi_chan)); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* Populate channel configurations */ + for (i = 0; i < config->num_channels; i++) { + struct mhi_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = ch_cfg->num_elements; + if (!mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring length should be bigger than + * the transfer ring length due to internal logical channels + * in device. So host can queue much more buffers than transfer + * ring length. Example, RSC channels should have a larger local + * channel length than transfer ring length. + */ + mhi_chan->buf_ring.elements = ch_cfg->local_elements; + if (!mhi_chan->buf_ring.elements) + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + mhi_chan->er_index = ch_cfg->event_ring; + mhi_chan->dir = ch_cfg->dir; + + /* + * For most channels, chtype is identical to channel directions. + * So, if it is not defined then assign channel direction to + * chtype + */ + mhi_chan->type = ch_cfg->type; + if (!mhi_chan->type) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = ch_cfg->ee_mask; + mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; + mhi_chan->lpm_notify = ch_cfg->lpm_notify; + mhi_chan->offload_ch = ch_cfg->offload_channel; + mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; + mhi_chan->pre_alloc = ch_cfg->auto_queue; + mhi_chan->wake_capable = ch_cfg->wake_capable; + + /* + * If MHI host allocates buffers, then the channel direction + * should be DMA_FROM_DEVICE + */ + if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* + * Bi-directional and direction less channel must be an + * offload channel + */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { + dev_err(dev, "Invalid Door bell mode\n"); + goto error_chan_cfg; + } + } + + if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_chan->db_cfg.process_db = mhi_db_brstmode; + else + mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + vfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} + +static int parse_config(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + int ret; + + /* Parse MHI channel configuration */ + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + /* Parse MHI event configuration */ + ret = parse_ev_cfg(mhi_cntrl, config); + if (ret) + goto error_ev_cfg; + + mhi_cntrl->timeout_ms = config->timeout_ms; + if (!mhi_cntrl->timeout_ms) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = config->use_bounce_buf; + mhi_cntrl->buffer_len = config->buf_len; + if (!mhi_cntrl->buffer_len) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + + /* By default, host is allowed to ring DB in both M0 and M2 states */ + mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; + if (config->m2_no_db) + mhi_cntrl->db_access &= ~MHI_PM_M2; + + return 0; + +error_ev_cfg: + vfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + u32 soc_info; + int ret, i; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || + !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || + !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || + !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || + !mhi_cntrl->irq || !mhi_cntrl->reg_len) + return -EINVAL; + + ret = parse_config(mhi_cntrl, config); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_event; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); + if (!mhi_cntrl->hiprio_wq) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + /* Skip for offload events */ + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + + /* used in setting bei field of TRE */ + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + mhi_chan->intmod = mhi_event->intmod; + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* Read the MHI device info */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + SOC_HW_VERSION_OFFS, &soc_info); + if (ret) + goto err_destroy_wq; + + mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); + mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); + mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); + mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); + + mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); + if (mhi_cntrl->index < 0) { + ret = mhi_cntrl->index; + goto err_destroy_wq; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto err_ida_free; + + /* Register controller with MHI bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); + ret = PTR_ERR(mhi_dev); + goto error_setup_irq; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + + /* Init wakeup source */ + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_release_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_create_debugfs(mhi_cntrl); + + return 0; + +err_release_dev: + put_device(&mhi_dev->dev); +error_setup_irq: + mhi_deinit_free_irq(mhi_cntrl); +err_ida_free: + ida_free(&mhi_controller_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->hiprio_wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_event: + kfree(mhi_cntrl->mhi_event); + vfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_register_controller); + +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; + unsigned int i; + + mhi_deinit_free_irq(mhi_cntrl); + mhi_destroy_debugfs(mhi_cntrl); + + destroy_workqueue(mhi_cntrl->hiprio_wq); + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + + /* Drop the references to MHI devices created for channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->mhi_dev) + continue; + + put_device(&mhi_chan->mhi_dev->dev); + } + vfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_controller_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_unregister_controller); + +struct mhi_controller *mhi_alloc_controller(void) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); + + return mhi_cntrl; +} +EXPORT_SYMBOL_GPL(mhi_alloc_controller); + +void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_free_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 bhi_off, bhie_off; + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); + if (ret) { + dev_err(dev, "Error getting BHI offset\n"); + goto error_reg_offset; + } + + if (bhi_off >= mhi_cntrl->reg_len) { + dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", + bhi_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; + + if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + dev_err(dev, "Error getting BHIE offset\n"); + goto error_reg_offset; + } + + if (bhie_off >= mhi_cntrl->reg_len) { + dev_err(dev, + "BHIe offset: 0x%x is out of range: 0x%zx\n", + bhie_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; + } + + if (mhi_cntrl->rddm_size) { + /* + * This controller supports RDDM, so we need to manually clear + * BHIE RX registers since POR values are undefined. + */ + memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + /* + * Allocate RDDM table for debugging purpose if specified + */ + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + if (mhi_cntrl->rddm_image) { + ret = mhi_rddm_prepare(mhi_cntrl, + mhi_cntrl->rddm_image); + if (ret) { + mhi_free_bhie_table(mhi_cntrl, + mhi_cntrl->rddm_image); + goto error_reg_offset; + } + } + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_reg_offset: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_cntrl->bhi = NULL; + mhi_cntrl->bhie = NULL; + + mhi_deinit_dev_ctxt(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created if the mhi_dev + * associated with it is NULL. This scenario will happen during the + * controller suspend and resume. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + + if (mhi_cntrl->mhi_dev) { + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + } else { + /* for MHI controller device, parent is the bus device (e.g. pci device) */ + dev->parent = mhi_cntrl->cntrl_dev; + } + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_wake = 0; + + return mhi_dev; +} + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + int ret; + + /* Bring device out of LPM */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + + if (ul_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + } + + ret = -EINVAL; + if (dl_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * If the channel event ring is managed by client, then + * status_cb must be provided so that the framework can + * notify pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + } + + /* Call the user provided probe function */ + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + if (ret) + goto exit_probe; + + mhi_device_put(mhi_dev); + + return ret; + +exit_probe: + mhi_unprepare_from_transfer(mhi_dev); + + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum mhi_ch_state ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* Wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* Set the channel state to disabled */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* Reset the non-offload channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + mhi_drv->remove(mhi_dev); + + /* De-init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if ((ch_state[dir] == MHI_CH_STATE_ENABLED || + ch_state[dir] == MHI_CH_STATE_STOP) && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + while (mhi_dev->dev_wake) + mhi_device_put(mhi_dev); + + return 0; +} + +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->owner = owner; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_unregister); + +static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, + .uevent = mhi_uevent, + .dev_groups = mhi_dev_groups, +}; + +static int __init mhi_init(void) +{ + mhi_debugfs_init(); + return bus_register(&mhi_bus_type); +} + +static void __exit mhi_exit(void) +{ + mhi_debugfs_exit(); + bus_unregister(&mhi_bus_type); +} + +postcore_initcall(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h new file mode 100644 index 000000000..01fd10a39 --- /dev/null +++ b/drivers/bus/mhi/host/internal.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include "../common.h" + +extern struct bus_type mhi_bus_type; + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET 0xb0 +#define MHI_SOC_RESET_REQ BIT(0) + +#define SOC_HW_VERSION_OFFS 0x224 +#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28) +#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16) +#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8) +#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0) + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_ch_state_type { + MHI_CH_STATE_TYPE_RESET, + MHI_CH_STATE_TYPE_STOP, + MHI_CH_STATE_TYPE_START, + MHI_CH_STATE_TYPE_MAX, +}; + +extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; +#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ + "INVALID_STATE" : \ + mhi_ch_state_type_str[(state)]) + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) +#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) +#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ + ee == MHI_EE_FP) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_FP, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 +#define MHI_MAX_MTU 0xffff +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + __le64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + void *v_addr; + void *bb_addr; + void *wp; + void *cb_buf; + dma_addr_t p_addr; + size_t len; + enum dma_data_direction dir; + bool used; /* Indicates whether the buffer is used or not */ + bool pre_mapped; /* Already pre-mapped by client */ +}; + +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + struct tasklet_struct task; + spinlock_t lock; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ +}; + +struct mhi_chan { + const char *name; + /* + * Important: When consuming, increment tre_ring first and when + * releasing, decrement buf_ring first. If tre_ring has space, buf_ring + * is guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ch_ee_mask ee_mask; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool wake_capable; +}; + +/* Default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +/* debugfs related functions */ +#ifdef CONFIG_MHI_BUS_DEBUG +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_debugfs_init(void); +void mhi_debugfs_exit(void); +#else +static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_debugfs_init(void) +{ +} + +static inline void mhi_debugfs_exit(void) +{ +} +#endif + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +/* Power management APIs */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(u32 state); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); +static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) +{ + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); +} + +static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) +{ + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); +} + +/* Register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 *out); +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val, u32 delayus); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Initialization methods */ +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, unsigned int flags); + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Event processing methods */ +void mhi_ctrl_ev_task(unsigned long data); +void mhi_ev_task(unsigned long data); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + +/* ISR handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags); +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c new file mode 100644 index 000000000..8378c3319 --- /dev/null +++ b/drivers/bus/mhi/host/main.c @@ -0,0 +1,1705 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include "internal.h" + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out) +{ + return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> __ffs(mask); + + return 0; +} + +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 val, u32 delayus) +{ + int ret; + u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); + if (ret) + return ret; + + if (out == val) + return 0; + + fsleep(delayus); + } + + return -ETIMEDOUT; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val) +{ + mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); +} + +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + tmp &= ~mask; + tmp |= (val << __ffs(mask)); + mhi_write_reg(mhi_cntrl, base, offset, tmp); + + return 0; +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = cpu_to_le64(db); + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); + *ring->ctxt_wp = cpu_to_le64(db); + + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, + ring->db_addr, db); +} + +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} +EXPORT_SYMBOL_GPL(mhi_get_exec_env); + +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, &state); + return ret ? MHI_STATE_MAX : state; +} +EXPORT_SYMBOL_GPL(mhi_get_mhi_state); + +void mhi_soc_reset(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->reset) { + mhi_cntrl->reset(mhi_cntrl); + return; + } + + /* Generic MHI SoC reset */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); +} +EXPORT_SYMBOL_GPL(mhi_soc_reset); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, + buf_info->v_addr, buf_info->len, + buf_info->dir); + if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + buf_info->bb_addr, buf_info->p_addr); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) { + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + } else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) +{ + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && + !(addr & (sizeof(struct mhi_ring_element) - 1)); +} + +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_chan *ul_chan, *dl_chan; + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + enum mhi_ee_type ee = MHI_EE_MAX; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + /* + * If execution environment is specified, remove only those devices that + * started in them based on ee_mask for the channels as we move on to a + * different execution environment + */ + if (data) + ee = *(enum mhi_ee_type *)data; + + /* + * For the suspend and resume case, this function will get called + * without mhi_unregister_controller(). Hence, we need to drop the + * references to mhi_dev created for ul and dl channels. We can + * be sure that there will be no instances of mhi_dev left after + * this. + */ + if (ul_chan) { + if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&ul_chan->mhi_dev->dev); + } + + if (dl_chan) { + if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&dl_chan->mhi_dev->dev); + } + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +int mhi_get_free_desc_count(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); + +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} +EXPORT_SYMBOL_GPL(mhi_notify); + +/* Bind MHI channels to MHI devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) + return; + + mhi_dev->dev_type = MHI_DEVICE_XFER; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + break; + case DMA_FROM_DEVICE: + /* We use dl_chan as offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + break; + default: + dev_err(dev, "Direction not supported\n"); + put_device(&mhi_dev->dev); + return; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + } + } + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + /* Init wakeup source if available */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + } +} + +irqreturn_t mhi_irq_handler(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt; + struct mhi_ring *ev_ring = &mhi_event->ring; + dma_addr_t ptr; + void *dev_rp; + + /* + * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq() + * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt + * before handling the IRQs. + */ + if (!mhi_cntrl->mhi_ctxt) { + dev_dbg(&mhi_cntrl->mhi_dev->dev, + "mhi_ctxt has been freed\n"); + return IRQ_HANDLED; + } + + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + ptr = le64_to_cpu(er_ctxt->rp); + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return IRQ_HANDLED; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* For client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); + } else { + tasklet_schedule(&mhi_event->task); + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) +{ + struct mhi_controller *mhi_cntrl = priv; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + enum mhi_ee_type ee; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto exit_intvec; + } + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_state_str(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(ee), mhi_state_str(state)); + + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (pm_state != MHI_PM_SYS_ERR_DETECT) + goto exit_intvec; + + switch (ee) { + case MHI_EE_RDDM: + /* proceed if power down is not already in progress */ + if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + mhi_cntrl->ee = ee; + wake_up_all(&mhi_cntrl->state_event); + } + break; + case MHI_EE_PBL: + case MHI_EE_EDL: + case MHI_EE_PTHRU: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); + mhi_cntrl->ee = ee; + wake_up_all(&mhi_cntrl->state_event); + mhi_pm_sys_err_handler(mhi_cntrl); + break; + default: + wake_up_all(&mhi_cntrl->state_event); + mhi_pm_sys_err_handler(mhi_cntrl); + break; + } + +exit_intvec: + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + + /* Wake up events waiting for state change */ + wake_up_all(&mhi_cntrl->state_event); + + return IRQ_WAKE_THREAD; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + /* Update the WP */ + ring->wp += ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); + + /* Update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* Update to all cores */ + smp_wmb(); +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_ring_element *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result; + unsigned long flags = 0; + u32 ev_code; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * If it's a DB Event then we need to grab the lock + * with preemption disabled and as a write because we + * have to update db register and there are chances that + * another thread could be doing the same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_ring_element *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + if (!is_valid_ring_ptr(tre_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the tre ring\n"); + break; + } + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* If it's the last TRE, get length from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + /* Unmap if it's not pre-mapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = + min_t(u16, xfer_len, buf_info->len); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + read_unlock_bh(&mhi_chan->lock); + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } + + /* + * Recycle the buffer if buffer is pre-allocated, + * if there is an error, not much we can do apart + * from dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, + mhi_chan->dir, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + dev_err(dev, + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + + read_lock_bh(&mhi_chan->lock); + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long pm_lock_flags; + + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); + break; + } + case MHI_EV_CC_BAD_TRE: + default: + dev_err(dev, "Unknown event 0x%x\n", ev_code); + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_ring_element *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* Received out of bound cookie */ + WARN_ON(cookie >= buf_ring->len); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + WARN_ON(!buf_info->used); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_ring_element *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_ring_element *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + if (!is_valid_ring_ptr(mhi_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the cmd ring\n"); + return; + } + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } else { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Completion packet for invalid channel ID: %d\n", chan); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_ring_element *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 chan; + int count = 0; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); + + /* + * This is a quick check to avoid unnecessary event processing + * in case MHI is already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + switch (type) { + case MHI_PKT_TYPE_BW_REQ_EVENT: + { + struct mhi_link_info *link_info; + + link_info = &mhi_cntrl->mhi_link_info; + write_lock_irq(&mhi_cntrl->pm_lock); + link_info->target_link_speed = + MHI_TRE_GET_EV_LINKSPEED(local_rp); + link_info->target_link_width = + MHI_TRE_GET_EV_LINKWIDTH(local_rp); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_dbg(dev, "Received BW_REQ event\n"); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); + break; + } + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + dev_dbg(dev, "State change event to state: %s\n", + mhi_state_str(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum mhi_pm_state pm_state; + + dev_dbg(dev, "System error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + break; + } + default: + dev_err(dev, "Invalid state: %s\n", + mhi_state_str(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum dev_st_transition st = DEV_ST_TRANSITION_MAX; + enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); + + dev_dbg(dev, "Received EE event: %s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = DEV_ST_TRANSITION_SBL; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = DEV_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_FP: + st = DEV_ST_TRANSITION_FP; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + dev_err(dev, + "Unhandled EE event: 0x%x\n", type); + } + if (st != DEV_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + + break; + } + case MHI_PKT_TYPE_TX_EVENT: + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + if (!mhi_chan->configured) + break; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + break; + default: + dev_err(dev, "Unhandled event type: %d\n", type); + break; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_ring_element *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + int ret; + + /* + * We can check PM state w/o a lock here because there is no way + * PM state can change from reg access valid to no access while this + * thread being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + /* + * We may have a pending event but not allowed to + * process it since we are probably in a suspended state, + * so trigger a resume. + */ + mhi_trigger_resume(mhi_cntrl); + + return; + } + + /* Process ctrl events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * We received an IRQ but no events to process, maybe device went to + * SYS_ERR state? Check the state to confirm. + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + } +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, + enum dma_data_direction dir, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + unsigned long flags; + int ret; + + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) + return -EIO; + + ret = mhi_is_ring_full(mhi_cntrl, tre_ring); + if (unlikely(ret)) + return -EAGAIN; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* Packet is queued, take a usage ref to exit M3 if necessary + * for host->device buffer, balanced put is done on buffer completion + * for device->host buffer, balanced put is after ringing the DB + */ + mhi_cntrl->runtime_get(mhi_cntrl); + + /* Assert dev_wake (to exit/prevent M1/M2)*/ + mhi_cntrl->wake_toggle(mhi_cntrl); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + + if (dir == DMA_FROM_DEVICE) + mhi_cntrl->runtime_put(mhi_cntrl); + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return ret; +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags) +{ + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_buf_info buf_info = { }; + + buf_info.v_addr = skb->data; + buf_info.cb_buf = skb; + buf_info.len = len; + + if (unlikely(mhi_chan->pre_alloc)) + return -EINVAL; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_skb); + +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_buf_info buf_info = { }; + + buf_info.p_addr = mhi_buf->dma_addr; + buf_info.cb_buf = mhi_buf; + buf_info.pre_mapped = true; + buf_info.len = len; + + if (unlikely(mhi_chan->pre_alloc)) + return -EINVAL; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_dma); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_ring_element *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + /* Protect accesses for reading and incrementing WP */ + write_lock_bh(&mhi_chan->lock); + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + WARN_ON(buf_info->used); + buf_info->pre_mapped = info->pre_mapped; + if (info->pre_mapped) + buf_info->p_addr = info->p_addr; + else + buf_info->v_addr = info->v_addr; + buf_info->cb_buf = info->cb_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = info->len; + + if (!info->pre_mapped) { + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) { + write_unlock_bh(&mhi_chan->lock); + return ret; + } + } + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + write_unlock_bh(&mhi_chan->lock); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_buf_info buf_info = { }; + + buf_info.v_addr = buf; + buf_info.cb_buf = buf; + buf_info.len = len; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_buf); + +bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return mhi_is_ring_full(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL_GPL(mhi_queue_is_full); + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd) +{ + struct mhi_ring_element *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int chan = 0; + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_STOP_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + default: + dev_err(dev, "Command not supported\n"); + break; + } + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_ch_state_type to_state) +{ + struct device *dev = &mhi_chan->mhi_dev->dev; + enum mhi_cmd_type cmd = MHI_CMD_NOP; + int ret; + + dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, + TO_CH_STATE_TYPE_STR(to_state)); + + switch (to_state) { + case MHI_CH_STATE_TYPE_RESET: + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_ENABLED && + mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { + write_unlock_irq(&mhi_chan->lock); + return -EINVAL; + } + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + cmd = MHI_CMD_RESET_CHAN; + break; + case MHI_CH_STATE_TYPE_STOP: + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + return -EINVAL; + + cmd = MHI_CMD_STOP_CHAN; + break; + case MHI_CH_STATE_TYPE_START: + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_DISABLED) + return -EINVAL; + + cmd = MHI_CMD_START_CHAN; + break; + default: + dev_err(dev, "%d: Channel state update to %s not allowed\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + return -EINVAL; + } + + /* bring host and device out of suspended states */ + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + return ret; + mhi_cntrl->runtime_get(mhi_cntrl); + + reinit_completion(&mhi_chan->completion); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); + if (ret) { + dev_err(dev, "%d: Failed to send %s channel command\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + goto exit_channel_update; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + dev_err(dev, + "%d: Failed to receive %s channel command completion\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + ret = -EIO; + goto exit_channel_update; + } + + ret = 0; + + if (to_state != MHI_CH_STATE_TYPE_RESET) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? + MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; + write_unlock_irq(&mhi_chan->lock); + } + + dev_dbg(dev, "%d: Channel state change to %s successful\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + +exit_channel_update: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; +} + +static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + struct device *dev = &mhi_chan->mhi_dev->dev; + + mutex_lock(&mhi_chan->mutex); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); + goto exit_unprepare_channel; + } + + /* no more processing events for this channel */ + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_RESET); + if (ret) + dev_err(dev, "%d: Failed to reset channel, still resetting\n", + mhi_chan->chan); + +exit_unprepare_channel: + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); + + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, unsigned int flags) +{ + int ret = 0; + struct device *dev = &mhi_chan->mhi_dev->dev; + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* Check of client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) + goto error_init_chan; + } + + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_START); + if (ret) + goto error_pm_state; + + if (mhi_chan->dir == DMA_FROM_DEVICE) + mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); + + /* Pre-allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + struct mhi_buf_info info = { }; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* Prepare transfer descriptors */ + info.v_addr = buf; + info.cb_buf = buf; + info.len = len; + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); + if (ret) { + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + mutex_unlock(&mhi_chan->mutex); + + return 0; + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + mutex_unlock(&mhi_chan->mutex); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) + +{ + struct mhi_ring_element *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long flags; + dma_addr_t ptr; + + dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + dev_rp = ev_ring->rp; + } else { + dev_rp = mhi_to_virtual(ev_ring, ptr); + } + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + dev_dbg(dev, "Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* Reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } + + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* Nothing to reset, client doesn't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); +} + +static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); + if (ret) + goto error_open_chan; + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} + +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, 0); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); + +int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c new file mode 100644 index 000000000..caa4ce28c --- /dev/null +++ b/drivers/bus/mhi/host/pci_generic.c @@ -0,0 +1,1226 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MHI PCI driver - MHI over PCI controller driver + * + * This module is a generic driver for registering MHI-over-PCI devices, + * such as PCIe QCOM modems. + * + * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> + */ + +#include <linux/aer.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/timer.h> +#include <linux/workqueue.h> + +#define MHI_PCI_DEFAULT_BAR_NUM 0 + +#define MHI_POST_RESET_DELAY_MS 2000 + +#define HEALTH_CHECK_PERIOD (HZ * 2) + +/** + * struct mhi_pci_dev_info - MHI PCI device specific information + * @config: MHI controller configuration + * @name: name of the PCI module + * @fw: firmware path (if any) + * @edl: emergency download mode firmware path (if any) + * @bar_num: PCI base address register to use for MHI MMIO register space + * @dma_data_width: DMA transfer word size (32 or 64 bits) + * @mru_default: default MRU size for MBIM network packets + * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead + * of inband wake support (such as sdx24) + */ +struct mhi_pci_dev_info { + const struct mhi_controller_config *config; + const char *name; + const char *fw; + const char *edl; + unsigned int bar_num; + unsigned int dma_data_width; + unsigned int mru_default; + bool sideband_wake; +}; + +#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + .auto_queue = true, \ + } + +#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_CTRL, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_ENABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = true, \ + } \ + +#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_ENABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = true, \ + } + +#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_SBL), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_SBL), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_FP), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_FP), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 1, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = true, \ + .client_managed = false, \ + .offload_channel = false, \ + .channel = ch_num, \ + } + +static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), +}; + +static struct mhi_event_config modem_qcom_v1_mhi_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0, 64), + /* DIAG dedicated event ring */ + MHI_EVENT_CONFIG_DATA(1, 128), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { + .name = "qcom-sdx65m", + .fw = "qcom/sdx65m/xbl.elf", + .edl = "qcom/sdx65m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { + .name = "qcom-sdx55m", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { + .name = "qcom-sdx24", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = true, +}; + +static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), + MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + /* The EDL firmware is a flash-programmer exposing firehose protocol */ + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_quectel_em1xx_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) +}; + +static const struct mhi_controller_config modem_quectel_em1xx_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), + .ch_cfg = mhi_quectel_em1xx_channels, + .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), + .event_cfg = mhi_quectel_em1xx_events, +}; + +static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { + .name = "quectel-em1xx", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + +static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_foxconn_sdx55_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) +}; + +static const struct mhi_controller_config modem_foxconn_sdx55_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), + .ch_cfg = mhi_foxconn_sdx55_channels, + .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), + .event_cfg = mhi_foxconn_sdx55_events, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { + .name = "foxconn-sdx55", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { + .name = "foxconn-sdx65", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_mv3x_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), + /* MBIM Control Channel */ + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), + /* MBIM Data Channel */ + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), +}; + +static struct mhi_event_config mhi_mv3x_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 256), + MHI_EVENT_CONFIG_DATA(1, 256), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), +}; + +static const struct mhi_controller_config modem_mv3x_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_mv3x_channels), + .ch_cfg = mhi_mv3x_channels, + .num_events = ARRAY_SIZE(mhi_mv3x_events), + .event_cfg = mhi_mv3x_events, +}; + +static const struct mhi_pci_dev_info mhi_mv31_info = { + .name = "cinterion-mv31", + .config = &modem_mv3x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_pci_dev_info mhi_mv32_info = { + .name = "cinterion-mv32", + .config = &modem_mv3x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), +}; + +static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { + /* first ring is control+data and DIAG ring */ + MHI_EVENT_CONFIG_CTRL(0, 2048), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static const struct mhi_controller_config modem_sierra_em919x_config = { + .max_channels = 128, + .timeout_ms = 24000, + .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), + .ch_cfg = mhi_sierra_em919x_channels, + .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), + .event_cfg = modem_sierra_em919x_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { + .name = "sierra-em919x", + .config = &modem_sierra_em919x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), +}; + +static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), + .ch_cfg = mhi_telit_fn980_hw_v1_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), + .event_cfg = mhi_telit_fn980_hw_v1_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { + .name = "telit-fn980-hwv1", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_telit_fn980_hw_v1_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_telit_fn990_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), + MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_telit_fn990_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_telit_fn990_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), + .ch_cfg = mhi_telit_fn990_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .name = "telit-fn990", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + +/* Keep the list sorted based on the PID. New VID should be added as the last entry */ +static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), + .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* Telit FN980 hardware revision v1 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), + .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* DW5930e (sdx55), With eSIM, It's also T99W175 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W175 (sdx55), Based on Qualcomm new baseline */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W175 (sdx55) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W368 (sdx65) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* T99W373 (sdx62) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* MV31-W (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00b3), + .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + /* MV31-W (Cinterion), based on new baseline */ + { PCI_DEVICE(0x1269, 0x00b4), + .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + /* MV32-WA (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00ba), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* MV32-WB (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00bb), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); + +enum mhi_pci_device_status { + MHI_PCI_DEV_STARTED, + MHI_PCI_DEV_SUSPENDED, +}; + +struct mhi_pci_device { + struct mhi_controller mhi_cntrl; + struct pci_saved_state *pci_state; + struct work_struct recovery_work; + struct timer_list health_check_timer; + unsigned long status; +}; + +static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 *out) +{ + *out = readl(addr); + return 0; +} + +static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 val) +{ + writel(val, addr); +} + +static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + + /* Nothing to do for now */ + switch (cb) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); + pm_runtime_forbid(&pdev->dev); + break; + case MHI_CB_EE_MISSION_MODE: + pm_runtime_allow(&pdev->dev); + break; + default: + break; + } +} + +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) +{ + /* no-op */ +} + +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) +{ + /* no-op */ +} + +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) +{ + /* no-op */ +} + +static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, + unsigned int bar_num, u64 dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + err = pci_assign_resource(pdev, bar_num); + if (err) + return err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); + if (err) { + dev_err(&pdev->dev, "failed to map pci region: %d\n", err); + return err; + } + mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; + mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); + + err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); + if (err) { + dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); + return err; + } + + pci_set_master(pdev); + + return 0; +} + +static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *mhi_cntrl_config) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int nr_vectors, i; + int *irq; + + /* + * Alloc one MSI vector for BHI + one vector per event ring, ideally... + * No explicit pci_free_irq_vectors required, done by pcim_release. + */ + mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; + + nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); + if (nr_vectors < 0) { + dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", + nr_vectors); + return nr_vectors; + } + + if (nr_vectors < mhi_cntrl->nr_irqs) { + dev_warn(&pdev->dev, "using shared MSI\n"); + + /* Patch msi vectors, use only one (shared) */ + for (i = 0; i < mhi_cntrl_config->num_events; i++) + mhi_cntrl_config->event_cfg[i].irq = 0; + mhi_cntrl->nr_irqs = 1; + } + + irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + for (i = 0; i < mhi_cntrl->nr_irqs; i++) { + int vector = i >= nr_vectors ? (nr_vectors - 1) : i; + + irq[i] = pci_irq_vector(pdev, vector); + } + + mhi_cntrl->irq = irq; + + return 0; +} + +static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) +{ + /* The runtime_get() MHI callback means: + * Do whatever is requested to leave M3. + */ + return pm_runtime_get(mhi_cntrl->cntrl_dev); +} + +static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) +{ + /* The runtime_put() MHI callback means: + * Device can be moved in M3 state. + */ + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); +} + +static void mhi_pci_recovery_work(struct work_struct *work) +{ + struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, + recovery_work); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + dev_warn(&pdev->dev, "device recovery started\n"); + + del_timer(&mhi_pdev->health_check_timer); + pm_runtime_forbid(&pdev->dev); + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + pci_set_power_state(pdev, PCI_D0); + pci_load_saved_state(pdev, mhi_pdev->pci_state); + pci_restore_state(pdev); + + if (!mhi_pci_is_alive(mhi_cntrl)) + goto err_try_reset; + + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) + goto err_try_reset; + + err = mhi_sync_power_up(mhi_cntrl); + if (err) + goto err_unprepare; + + dev_dbg(&pdev->dev, "Recovery completed\n"); + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + return; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_try_reset: + if (pci_reset_function(pdev)) + dev_err(&pdev->dev, "Recovery failed\n"); +} + +static void health_check(struct timer_list *t) +{ + struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return; + + if (!mhi_pci_is_alive(mhi_cntrl)) { + dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); + queue_work(system_long_wq, &mhi_pdev->recovery_work); + return; + } + + /* reschedule in two seconds */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); +} + +static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; + const struct mhi_controller_config *mhi_cntrl_config; + struct mhi_pci_device *mhi_pdev; + struct mhi_controller *mhi_cntrl; + int err; + + dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name); + + /* mhi_pdev.mhi_cntrl must be zero-initialized */ + mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); + if (!mhi_pdev) + return -ENOMEM; + + INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); + timer_setup(&mhi_pdev->health_check_timer, health_check, 0); + + mhi_cntrl_config = info->config; + mhi_cntrl = &mhi_pdev->mhi_cntrl; + + mhi_cntrl->cntrl_dev = &pdev->dev; + mhi_cntrl->iova_start = 0; + mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->fw_image = info->fw; + mhi_cntrl->edl_image = info->edl; + + mhi_cntrl->read_reg = mhi_pci_read_reg; + mhi_cntrl->write_reg = mhi_pci_write_reg; + mhi_cntrl->status_cb = mhi_pci_status_cb; + mhi_cntrl->runtime_get = mhi_pci_runtime_get; + mhi_cntrl->runtime_put = mhi_pci_runtime_put; + mhi_cntrl->mru = info->mru_default; + + if (info->sideband_wake) { + mhi_cntrl->wake_get = mhi_pci_wake_get_nop; + mhi_cntrl->wake_put = mhi_pci_wake_put_nop; + mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + } + + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); + if (err) + return err; + + err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); + if (err) + return err; + + pci_set_drvdata(pdev, mhi_pdev); + + /* Have stored pci confspace at hand for restore in sudden PCI error. + * cache the state locally and discard the PCI core one. + */ + pci_save_state(pdev); + mhi_pdev->pci_state = pci_store_saved_state(pdev); + pci_load_saved_state(pdev, NULL); + + pci_enable_pcie_error_reporting(pdev); + + err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_disable_reporting; + + /* MHI bus does not power up the controller by default */ + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + goto err_unregister; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + goto err_unprepare; + } + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + + /* start health check */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + + /* Only allow runtime-suspend if PME capable (for wakeup) */ + if (pci_pme_capable(pdev, PCI_D3hot)) { + pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + } + + return 0; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_unregister: + mhi_unregister_controller(mhi_cntrl); +err_disable_reporting: + pci_disable_pcie_error_reporting(pdev); + + return err; +} + +static void mhi_pci_remove(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + del_timer_sync(&mhi_pdev->health_check_timer); + cancel_work_sync(&mhi_pdev->recovery_work); + + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + /* balancing probe put_noidle */ + if (pci_pme_capable(pdev, PCI_D3hot)) + pm_runtime_get_noresume(&pdev->dev); + + mhi_unregister_controller(mhi_cntrl); + pci_disable_pcie_error_reporting(pdev); +} + +static void mhi_pci_shutdown(struct pci_dev *pdev) +{ + mhi_pci_remove(pdev); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void mhi_pci_reset_prepare(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + dev_info(&pdev->dev, "reset\n"); + + del_timer(&mhi_pdev->health_check_timer); + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + /* cause internal device reset */ + mhi_soc_reset(mhi_cntrl); + + /* Be sure device reset has been executed */ + msleep(MHI_POST_RESET_DELAY_MS); +} + +static void mhi_pci_reset_done(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + /* Restore initial known working PCI state */ + pci_load_saved_state(pdev, mhi_pdev->pci_state); + pci_restore_state(pdev); + + /* Is device status available ? */ + if (!mhi_pci_is_alive(mhi_cntrl)) { + dev_err(&pdev->dev, "reset failed\n"); + return; + } + + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + return; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + mhi_unprepare_after_power_down(mhi_cntrl); + return; + } + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); +} + +static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } else { + /* Nothing to do */ + return PCI_ERS_RESULT_RECOVERED; + } + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) +{ + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void mhi_pci_io_resume(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + + dev_err(&pdev->dev, "PCI slot reset done\n"); + + queue_work(system_long_wq, &mhi_pdev->recovery_work); +} + +static const struct pci_error_handlers mhi_pci_err_handler = { + .error_detected = mhi_pci_error_detected, + .slot_reset = mhi_pci_slot_reset, + .resume = mhi_pci_io_resume, + .reset_prepare = mhi_pci_reset_prepare, + .reset_done = mhi_pci_reset_done, +}; + +static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return 0; + + del_timer(&mhi_pdev->health_check_timer); + cancel_work_sync(&mhi_pdev->recovery_work); + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + mhi_cntrl->ee != MHI_EE_AMSS) + goto pci_suspend; /* Nothing to do at MHI level */ + + /* Transition to M3 state */ + err = mhi_pm_suspend(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to suspend device: %d\n", err); + clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); + return -EBUSY; + } + +pci_suspend: + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + + return 0; +} + +static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return 0; + + err = pci_enable_device(pdev); + if (err) + goto err_recovery; + + pci_set_master(pdev); + pci_wake_from_d3(pdev, false); + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + mhi_cntrl->ee != MHI_EE_AMSS) + return 0; /* Nothing to do at MHI level */ + + /* Exit M3, transition to M0 state */ + err = mhi_pm_resume(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to resume device: %d\n", err); + goto err_recovery; + } + + /* Resume health check */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + + /* It can be a remote wakeup (no mhi runtime_get), update access time */ + pm_runtime_mark_last_busy(dev); + + return 0; + +err_recovery: + /* Do not fail to not mess up our PCI device state, the device likely + * lost power (d3cold) and we simply need to reset it from the recovery + * procedure, trigger the recovery asynchronously to prevent system + * suspend exit delaying. + */ + queue_work(system_long_wq, &mhi_pdev->recovery_work); + pm_runtime_mark_last_busy(dev); + + return 0; +} + +static int __maybe_unused mhi_pci_suspend(struct device *dev) +{ + pm_runtime_disable(dev); + return mhi_pci_runtime_suspend(dev); +} + +static int __maybe_unused mhi_pci_resume(struct device *dev) +{ + int ret; + + /* Depending the platform, device may have lost power (d3cold), we need + * to resume it now to check its state and recover when necessary. + */ + ret = mhi_pci_runtime_resume(dev); + pm_runtime_enable(dev); + + return ret; +} + +static int __maybe_unused mhi_pci_freeze(struct device *dev) +{ + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + /* We want to stop all operations, hibernation does not guarantee that + * device will be in the same state as before freezing, especially if + * the intermediate restore kernel reinitializes MHI device with new + * context. + */ + flush_work(&mhi_pdev->recovery_work); + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + return 0; +} + +static int __maybe_unused mhi_pci_restore(struct device *dev) +{ + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + + /* Reinitialize the device */ + queue_work(system_long_wq, &mhi_pdev->recovery_work); + + return 0; +} + +static const struct dev_pm_ops mhi_pci_pm_ops = { + SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) +#ifdef CONFIG_PM_SLEEP + .suspend = mhi_pci_suspend, + .resume = mhi_pci_resume, + .freeze = mhi_pci_freeze, + .thaw = mhi_pci_restore, + .poweroff = mhi_pci_freeze, + .restore = mhi_pci_restore, +#endif +}; + +static struct pci_driver mhi_pci_driver = { + .name = "mhi-pci-generic", + .id_table = mhi_pci_id_table, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .shutdown = mhi_pci_shutdown, + .err_handler = &mhi_pci_err_handler, + .driver.pm = &mhi_pci_pm_ops +}; +module_pci_driver(mhi_pci_driver); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c new file mode 100644 index 000000000..8a4362d75 --- /dev/null +++ b/drivers/bus/mhi/host/pm.c @@ -0,0 +1,1283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "internal.h" + +/* + * Not all MHI state transitions are synchronous. Transitions like Linkdown, + * SYS_ERR, and shutdown can happen anytime asynchronously. This function will + * transition to a new state only if we're allowed to. + * + * Priority increases as we go down. For instance, from any state in L0, the + * transition can be made to states in L1, L2 and L3. A notable exception to + * this rule is state DISABLE. From DISABLE state we can only transition to + * POR state. Also, while in L2 state, user cannot jump back to previous + * L1 or L0 states. + * + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 <--> M0 + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT + * SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> DISABLE + */ +static const struct mhi_pm_transitions dev_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE + }, +}; + +enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) + return cur_state; + + if (unlikely(dev_state_transitions[index].from_state != cur_state)) + return cur_state; + + if (unlikely(!(dev_state_transitions[index].to_states & state))) + return cur_state; + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + if (state == MHI_STATE_RESET) { + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 1); + } else { + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, state); + } + + if (ret) + dev_err(dev, "Failed to set MHI state to: %s\n", + mhi_state_str(state)); +} + +/* NOP for backward compatibility, host allowed to ring DB in M2 state */ +static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) +{ +} + +static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_put(mhi_cntrl, true); +} + +/* Handle device ready state transition */ +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + enum mhi_pm_state cur_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + int ret, i; + + /* Check if device entered error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, "Device link is not accessible\n"); + return -EIO; + } + + /* Wait for RESET to be cleared and READY bit to be set by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 0, interval_us); + if (ret) { + dev_err(dev, "Device failed to clear MHI Reset\n"); + return ret; + } + + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_READY_MASK, 1, interval_us); + if (ret) { + dev_err(dev, "Device failed to enter MHI Ready\n"); + return ret; + } + + dev_dbg(dev, "Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + dev_err(dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + dev_err(dev, "Device registers not accessible\n"); + goto error_mmio; + } + + /* Configure MMIO registers */ + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + dev_err(dev, "Error configuring MMIO registers\n"); + goto error_mmio; + } + + /* Add elements to all SW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if this is an offload or HW event */ + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* Update all cores */ + smp_wmb(); + + /* Ring the event ring db */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Set MHI to M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + dev_err(dev, "Unable to transition to M0 state\n"); + return -EIO; + } + mhi_cntrl->M0++; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + + /* Ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Only ring primary cmd ring if ring is not empty */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* Ring channel DB registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->db_cfg.reset_req) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->db_cfg.db_mode = true; + write_unlock_irq(&mhi_chan->lock); + } + + read_lock_irq(&mhi_chan->lock); + + /* Only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* + * After receiving the MHI state change event from the device indicating the + * transition to M1 state, the host can transition the device to M2 state + * for keeping it in low power state. + */ +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + mhi_cntrl->M2++; + wake_up_all(&mhi_cntrl->state_event); + + /* If there are any pending resources, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || + atomic_read(&mhi_cntrl->dev_wake))) { + dev_dbg(dev, + "Exiting M2, pending_pkts: %d dev_wake: %d\n", + atomic_read(&mhi_cntrl->pending_pkts), + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +/* MHI M3 completion handler */ +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + dev_err(dev, "Unable to transition to M3 state\n"); + return -EIO; + } + + mhi_cntrl->M3++; + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* Handle device Mission Mode transition */ +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; + int i, ret; + + dev_dbg(dev, "Processing Mission Mode transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + + if (!MHI_IN_MISSION_MODE(ee)) { + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + return -EIO; + } + mhi_cntrl->ee = ee; + write_unlock_irq(&mhi_cntrl->pm_lock); + + wake_up_all(&mhi_cntrl->state_event); + + device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, + mhi_destroy_device); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); + + /* Force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + ret = -EIO; + goto error_mission_mode; + } + + /* Add elements to all HW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* Update to all cores */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* + * The MHI devices are only created when the client device switches its + * Execution Environment (EE) to either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +/* Handle shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Processing disable transition with PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + /* Skip MHI RESET if in RDDM state */ + if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) + goto skip_mhi_reset; + + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 0, 25000); + if (ret) + dev_err(dev, "Device failed to clear MHI Reset\n"); + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + + if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + /* wait for ready to be set */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, + MHISTATUS, + MHISTATUS_READY_MASK, 1, 25000); + if (ret) + dev_err(dev, "Device failed to enter READY state\n"); + } + } + +skip_mhi_reset: + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + disable_irq(mhi_cntrl->irq[mhi_event->irq]); + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + dev_err(dev, "Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_state_str(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Handle system error transitions */ +static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state, prev_state; + enum dev_st_transition next_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + + /* We must notify MHI control driver so it can clean up first */ + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_SYS_ERR_PROCESS) { + dev_err(dev, "Failed to transition from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + goto exit_sys_error_transition; + } + + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + /* Wake up threads waiting for state transition */ + wake_up_all(&mhi_cntrl->state_event); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + &in_reset) || + !in_reset, timeout); + if (!ret || in_reset) { + dev_err(dev, "Device failed to exit MHI Reset state\n"); + goto exit_sys_error_transition; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* Transition to next state */ + if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (cur_state != MHI_PM_POR) { + dev_err(dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + goto exit_sys_error_transition; + } + next_state = DEV_ST_TRANSITION_PBL; + } else { + next_state = DEV_ST_TRANSITION_READY; + } + + mhi_queue_state_transition(mhi_cntrl, next_state); + +exit_sys_error_transition: + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_state_str(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Queue a new work item and schedule work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); + + return 0; +} + +/* SYS_ERR worker */ +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* skip if controller supports RDDM */ + if (mhi_cntrl->rddm_image) { + dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); + return; + } + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); +} + +/* Device State Transition worker */ +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling state transition: %s\n", + TO_DEV_STATE_TRANS_STR(itr->state)); + + switch (itr->state) { + case DEV_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_fw_load_handler(mhi_cntrl); + break; + case DEV_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + /* + * The MHI devices are only created when the client + * device switches its Execution Environment (EE) to + * either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + if (mhi_cntrl->fbc_download) + mhi_download_amss_image(mhi_cntrl); + break; + case DEV_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_create_devices(mhi_cntrl); + break; + case DEV_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_SYS_ERR: + mhi_pm_sys_error_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_DISABLE: + mhi_pm_disable_transition(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + dev_dbg(dev, "Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_dbg(dev, "Waiting for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + mhi_state_str(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state cur_state; + int ret; + + dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_state_str(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { + dev_warn(dev, "Resuming from non M3 state (%s)\n", + mhi_state_str(mhi_get_mhi_state(mhi_cntrl))); + if (!force) + return -EINVAL; + } + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M2 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + mhi_state_str(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, false); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, true); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume_force); + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +/* Assert device wake db */ +static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* + * If force flag is set, then increment the wake count value and + * ring wake db + */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* + * If resources are already requested, then just increment + * the wake count value and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* De-assert device wake db */ +static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, + bool override) +{ + unsigned long flags; + + /* + * Only continue if there is a single resource, else just decrement + * and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + enum mhi_state state; + enum mhi_ee_type current_ee; + enum dev_st_transition next_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + int ret, i; + + dev_info(dev, "Requested to power ON\n"); + + /* Supply default wake routines if not provided by controller driver */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || + !mhi_cntrl->wake_toggle) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? + mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + /* Setup BHI INTVEC */ + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* Confirm that the device is in valid exec env */ + if (!MHI_POWER_UP_CAPABLE(current_ee)) { + dev_err(dev, "%s is not a valid EE for power on\n", + TO_MHI_EXEC_STR(current_ee)); + ret = -EIO; + goto error_exit; + } + + state = mhi_get_mhi_state(mhi_cntrl); + dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", + TO_MHI_EXEC_STR(current_ee), mhi_state_str(state)); + + if (state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 0, interval_us); + if (ret) { + dev_info(dev, "Failed to reset MHI due to syserr state\n"); + goto error_exit; + } + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + /* IRQs have been requested during probe, so we just need to enable them. */ + enable_irq(mhi_cntrl->irq[0]); + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + enable_irq(mhi_cntrl->irq[mhi_event->irq]); + } + + /* Transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + dev_info(dev, "Power on setup success\n"); + + return 0; + +error_exit: + mhi_cntrl->pm_state = MHI_PM_DISABLE; + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum mhi_pm_state cur_state, transition_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_cntrl->pm_state; + if (cur_state == MHI_PM_DISABLE) { + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; /* Already powered down */ + } + + /* If it's not a graceful shutdown, force MHI to linkdown state */ + transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : + MHI_PM_LD_ERR_FATAL_DETECT; + + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state != transition_state) { + dev_err(dev, "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + /* Force link down or error fatal detected state */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + } + + /* mark device inactive to avoid any further host processing */ + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + wake_up_all(&mhi_cntrl->state_event); + + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + + /* Wait for shutdown to complete */ + flush_work(&mhi_cntrl->st_worker); + + disable_irq(mhi_cntrl->irq[0]); +} +EXPORT_SYMBOL_GPL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; + if (ret) + mhi_power_down(mhi_cntrl, false); + + return ret; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Check if device is already in RDDM */ + if (mhi_cntrl->ee == MHI_EE_RDDM) + return 0; + + dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* Wait for RDDM event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake++; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + mhi_dev->dev_wake++; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake--; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_put); |