From 50ba0232fd5312410f1b65247e774244f89a628e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:36 +0200 Subject: Merging upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/bus/mhi/ep/internal.h | 4 + drivers/bus/mhi/ep/main.c | 262 +++++++++++++++++++++++-------------- drivers/bus/mhi/ep/ring.c | 27 +++- drivers/bus/mhi/host/init.c | 2 + drivers/bus/mhi/host/internal.h | 11 +- drivers/bus/mhi/host/main.c | 5 +- drivers/bus/mhi/host/pci_generic.c | 22 ++++ drivers/bus/mhi/host/pm.c | 44 +++++-- 8 files changed, 260 insertions(+), 117 deletions(-) (limited to 'drivers/bus/mhi') diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index a2125fa5fe..577965f95f 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -126,6 +126,7 @@ struct mhi_ep_ring { union mhi_ep_ring_ctx *ring_ctx; struct mhi_ring_element *ring_cache; enum mhi_ep_ring_type type; + struct delayed_work intmodt_work; u64 rbase; size_t rd_offset; size_t wr_offset; @@ -135,7 +136,9 @@ struct mhi_ep_ring { u32 ch_id; u32 er_index; u32 irq_vector; + u32 intmodt; bool started; + bool irq_pending; }; struct mhi_ep_cmd { @@ -159,6 +162,7 @@ struct mhi_ep_chan { void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); enum mhi_ch_state state; enum dma_data_direction dir; + size_t rd_offset; u64 tre_loc; u32 tre_size; u32 tre_bytes_left; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 934cdbca08..65f1f6b9b5 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, mutex_unlock(&mhi_cntrl->event_lock); /* - * Raise IRQ to host only if the BEI flag is not set in TRE. Host might - * set this flag for interrupt moderation as per MHI protocol. + * As per the MHI specification, section 4.3, Interrupt moderation: + * + * 1. If BEI flag is not set, cancel any pending intmodt work if started + * for the event ring and raise IRQ immediately. + * + * 2. If both BEI and intmodt are set, and if no IRQ is pending for the + * same event ring, start the IRQ delayed work as per the value of + * intmodt. If previous IRQ is pending, then do nothing as the pending + * IRQ is enough for the host to process the current event ring element. + * + * 3. If BEI is set and intmodt is not set, no need to raise IRQ. */ - if (!bei) + if (!bei) { + if (READ_ONCE(ring->irq_pending)) + cancel_delayed_work(&ring->intmodt_work); + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) { + WRITE_ONCE(ring->irq_pending, true); + schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt)); + } return 0; @@ -183,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele goto err_unlock; } + + mhi_chan->rd_offset = ch_ring->rd_offset; } /* Set channel state to RUNNING */ @@ -312,21 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; - return !!(ring->rd_offset == ring->wr_offset); + return !!(mhi_chan->rd_offset == ring->wr_offset); } EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); +static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (buf_info->code != MHI_EV_CC_OVERFLOW) { + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } + } + + mhi_ep_ring_inc_index(ring); + +err_free_tre_buf: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); +} + static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, - struct mhi_ep_ring *ring, - struct mhi_result *result, - u32 len) + struct mhi_ep_ring *ring) { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; size_t tr_len, read_offset, write_offset; struct mhi_ep_buf_info buf_info = {}; + u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; bool tr_done = false; + void *buf_addr; u32 buf_left; int ret; @@ -339,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return -ENODEV; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { @@ -356,82 +438,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + if (!buf_addr) + return -ENOMEM; + buf_info.host_addr = mhi_chan->tre_loc + read_offset; - buf_info.dev_addr = result->buf_addr + write_offset; + buf_info.dev_addr = buf_addr + write_offset; buf_info.size = tr_len; + buf_info.cb = mhi_ep_read_completion; + buf_info.cb_buf = buf_addr; + buf_info.mhi_dev = mhi_chan->mhi_dev; + + if (mhi_chan->tre_bytes_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info); + ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); - return ret; + goto err_free_buf_addr; } buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - /* - * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been - * read completely: - * - * 1. Send completion event to the host based on the flags set in TRE. - * 2. Increment the local read offset of the transfer ring. - */ if (!mhi_chan->tre_bytes_left) { - /* - * The host will split the data packet into multiple TREs if it can't fit - * the packet in a single TRE. In that case, CHAIN flag will be set by the - * host for all TREs except the last one. - */ - if (MHI_TRE_DATA_GET_CHAIN(el)) { - /* - * IEOB (Interrupt on End of Block) flag will be set by the host if - * it expects the completion event for all TREs of a TD. - */ - if (MHI_TRE_DATA_GET_IEOB(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOB); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - } else { - /* - * IEOT (Interrupt on End of Transfer) flag will be set by the host - * for the last TRE of the TD and expects the completion event for - * the same. - */ - if (MHI_TRE_DATA_GET_IEOT(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOT); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - + if (MHI_TRE_DATA_GET_IEOT(el)) tr_done = true; - } - mhi_ep_ring_inc_index(ring); + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } - - result->bytes_xferd += tr_len; } while (buf_left && !tr_done); return 0; + +err_free_buf_addr: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); + + return ret; } -static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct mhi_result result = {}; - u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ep_chan *mhi_chan; int ret; @@ -452,32 +502,49 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); - if (!result.buf_addr) - return -ENOMEM; - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + ret = mhi_ep_read_channel(mhi_cntrl, ring); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); return ret; } - result.dir = mhi_chan->dir; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - result.bytes_xferd = 0; - memset(result.buf_addr, 0, len); - /* Read until the ring becomes empty */ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); - - kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); } return 0; } +static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct device *dev = &mhi_dev->dev; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, + buf_info->code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + return; + } + + mhi_ep_ring_inc_index(ring); +} + /* TODO: Handle partially formed TDs */ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) { @@ -488,7 +555,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) struct mhi_ring_element *el; u32 buf_left, read_offset; struct mhi_ep_ring *ring; - enum mhi_ev_ccs code; size_t tr_len; u32 tre_len; int ret; @@ -512,7 +578,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) goto err_exit; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; tre_len = MHI_TRE_DATA_GET_LEN(el); tr_len = min(buf_left, tre_len); @@ -521,33 +587,35 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) buf_info.dev_addr = skb->data + read_offset; buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); buf_info.size = tr_len; + buf_info.cb = mhi_ep_skb_completion; + buf_info.cb_buf = skb; + buf_info.mhi_dev = mhi_dev; - dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info); - if (ret < 0) { - dev_err(dev, "Error writing to the channel\n"); - goto err_exit; - } - - buf_left -= tr_len; /* * For all TREs queued by the host for DL channel, only the EOT flag will be set. * If the packet doesn't fit into a single TRE, send the OVERFLOW event to * the host so that the host can adjust the packet boundary to next TREs. Else send * the EOT event to the host indicating the packet boundary. */ - if (buf_left) - code = MHI_EV_CC_OVERFLOW; + if (buf_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; else - code = MHI_EV_CC_EOT; + buf_info.code = MHI_EV_CC_EOT; - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); - if (ret) { - dev_err(dev, "Error sending transfer completion event\n"); + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); goto err_exit; } - mhi_ep_ring_inc_index(ring); + buf_left -= tr_len; + + /* + * Update the read offset cached in mhi_chan. Actual read offset + * will be updated by the completion handler. + */ + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } while (buf_left); mutex_unlock(&mhi_chan->lock); @@ -748,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ep_ring_item *itr, *tmp; - struct mhi_ring_element *el; struct mhi_ep_ring *ring; struct mhi_ep_chan *chan; unsigned long flags; @@ -787,16 +854,14 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) } /* Sanity check to make sure there are elements in the ring */ - if (ring->rd_offset == ring->wr_offset) { + if (chan->rd_offset == ring->wr_offset) { mutex_unlock(&chan->lock); kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } - el = &ring->ring_cache[ring->rd_offset]; - dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); - ret = mhi_ep_process_ch_ring(ring, el); + ret = mhi_ep_process_ch_ring(ring); if (ret) { dev_err(dev, "Error processing ring for channel (%u): %d\n", ring->ch_id, ret); @@ -1399,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; + if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || + !mhi_cntrl->read_async || !mhi_cntrl->write_async) + return -EINVAL; + ret = mhi_ep_chan_init(mhi_cntrl, config); if (ret) return ret; @@ -1431,6 +1500,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, ret = -ENOMEM; goto err_destroy_tre_buf_cache; } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c index c673d7200b..aeb53b2c34 100644 --- a/drivers/bus/mhi/ep/ring.c +++ b/drivers/bus/mhi/ep/ring.c @@ -48,7 +48,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); buf_info.dev_addr = &ring->ring_cache[start]; - ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info); + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } else { @@ -56,7 +56,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); buf_info.dev_addr = &ring->ring_cache[start]; - ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info); + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; @@ -65,7 +65,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) buf_info.dev_addr = &ring->ring_cache[0]; buf_info.size = end * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info); + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } @@ -143,7 +143,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e buf_info.dev_addr = el; buf_info.size = sizeof(*el); - return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info); + return mhi_cntrl->write_sync(mhi_cntrl, &buf_info); } void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) @@ -162,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 } } +static void mhi_ep_raise_irq(struct work_struct *work) +{ + struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work); + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + WRITE_ONCE(ring->irq_pending, false); +} + int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx) { @@ -178,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, if (ring->type == RING_TYPE_CH) ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); - if (ring->type == RING_TYPE_ER) + if (ring->type == RING_TYPE_ER) { ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK, + le32_to_cpu(ring->ring_ctx->ev.intmod)); + + INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq); + } /* During ring init, both rp and wp are equal */ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); @@ -206,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) { + if (ring->type == RING_TYPE_ER) + cancel_delayed_work_sync(&ring->intmodt_work); + ring->started = false; kfree(ring->ring_cache); ring->ring_cache = NULL; diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index f78aefd2d7..8e5ec1a409 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -62,6 +62,7 @@ static const char * const mhi_pm_state_str[] = { [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", + [MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure", [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", }; @@ -881,6 +882,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->timeout_ms) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; mhi_cntrl->bounce_buf = config->use_bounce_buf; mhi_cntrl->buffer_len = config->buf_len; if (!mhi_cntrl->buffer_len) diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 2e139e76de..4b6deea17b 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -88,6 +88,7 @@ enum mhi_pm_state { MHI_PM_STATE_FW_DL_ERR, MHI_PM_STATE_SYS_ERR_DETECT, MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SYS_ERR_FAIL, MHI_PM_STATE_SHUTDOWN_PROCESS, MHI_PM_STATE_LD_ERR_FATAL_DETECT, MHI_PM_STATE_MAX @@ -104,14 +105,16 @@ enum mhi_pm_state { #define MHI_PM_FW_DL_ERR BIT(7) #define MHI_PM_SYS_ERR_DETECT BIT(8) #define MHI_PM_SYS_ERR_PROCESS BIT(9) -#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +#define MHI_PM_SYS_ERR_FAIL BIT(10) +#define MHI_PM_SHUTDOWN_PROCESS BIT(11) /* link not accessible */ -#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12) #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) + MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \ + MHI_PM_FW_DL_ERR))) #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access) @@ -321,7 +324,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, u32 *out); int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 mask, - u32 val, u32 delayus); + u32 val, u32 delayus, u32 timeout_ms); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index d6653cbcf9..abb561db9a 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 val, u32 delayus) + u32 mask, u32 val, u32 delayus, + u32 timeout_ms) { int ret; - u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + u32 out, retry = (timeout_ms * 1000) / delayus; while (retry--) { ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 08f3f039db..cd6cd14b3d 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = { MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101) }; +static const struct mhi_controller_config modem_qcom_v2_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .ready_timeout_ms = 50000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .max_channels = 128, .timeout_ms = 8000, @@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .event_cfg = modem_qcom_v1_mhi_events, }; +static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { + .name = "qcom-sdx75m", + .fw = "qcom/sdx75m/xbl.elf", + .edl = "qcom/sdx75m/edl.mbn", + .config = &modem_qcom_v2_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .name = "qcom-sdx65m", .fw = "qcom/sdx65m/xbl.elf", @@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = { .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 8a4362d75f..d0d033ce99 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -36,7 +36,10 @@ * M0 <--> M0 * M0 -> FW_DL_ERR * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 - * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS + * SYS_ERR_PROCESS -> SYS_ERR_FAIL + * SYS_ERR_FAIL -> SYS_ERR_DETECT + * SYS_ERR_PROCESS --> POR * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT * SHUTDOWN_PROCESS -> DISABLE * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT @@ -93,7 +96,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = { }, { MHI_PM_SYS_ERR_PROCESS, - MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_FAIL, + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT }, /* L2 States */ @@ -163,6 +171,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) enum mhi_pm_state cur_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + u32 timeout_ms; int ret, i; /* Check if device entered error state */ @@ -173,14 +182,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) /* Wait for RESET to be cleared and READY bit to be set by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_err(dev, "Device failed to clear MHI Reset\n"); return ret; } + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, 1, interval_us); + MHISTATUS_READY_MASK, 1, interval_us, + timeout_ms); if (ret) { dev_err(dev, "Device failed to enter MHI Ready\n"); return ret; @@ -479,7 +492,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Wait for the reset bit to be cleared by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, 25000); + MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to clear MHI Reset\n"); @@ -492,8 +505,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { /* wait for ready to be set */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, - MHISTATUS, - MHISTATUS_READY_MASK, 1, 25000); + MHISTATUS, MHISTATUS_READY_MASK, + 1, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to enter READY state\n"); } @@ -624,7 +637,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) !in_reset, timeout); if (!ret || in_reset) { dev_err(dev, "Device failed to exit MHI Reset state\n"); - goto exit_sys_error_transition; + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_FAIL); + write_unlock_irq(&mhi_cntrl->pm_lock); + /* Shutdown may have occurred, otherwise cleanup now */ + if (cur_state != MHI_PM_SYS_ERR_FAIL) + goto exit_sys_error_transition; } /* @@ -1111,7 +1130,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); goto error_exit; @@ -1202,14 +1222,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down); int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); + u32 timeout_ms; if (ret) return ret; + /* Some devices need more time to set ready during power up */ + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; wait_event_timeout(mhi_cntrl->state_event, MHI_IN_MISSION_MODE(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); + msecs_to_jiffies(timeout_ms)); ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; if (ret) -- cgit v1.2.3