diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
commit | 8b0a8165cdad0f4133837d753649ef4682e42c3b (patch) | |
tree | 5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /drivers/net/wwan | |
parent | Releasing progress-linux version 6.8.12-1~progress7.99u1. (diff) | |
download | linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wwan')
-rw-r--r-- | drivers/net/wwan/iosm/iosm_ipc_devlink.c | 2 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 47 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 18 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_modem_ops.c | 14 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_modem_ops.h | 1 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_pci.c | 103 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_pci.h | 14 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_port.h | 4 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_port_proxy.c | 110 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_port_proxy.h | 10 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_port_wwan.c | 115 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_reg.h | 24 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_state_monitor.c | 132 | ||||
-rw-r--r-- | drivers/net/wwan/t7xx/t7xx_state_monitor.h | 1 | ||||
-rw-r--r-- | drivers/net/wwan/wwan_core.c | 36 | ||||
-rw-r--r-- | drivers/net/wwan/wwan_hwsim.c | 16 |
16 files changed, 532 insertions, 115 deletions
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index 2fe724d623..33c5a46f1b 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink) rc = PTR_ERR(devlink->cd_regions[i]); dev_err(devlink->dev, "Devlink region fail,err %d", rc); /* Delete previously created regions */ - for ( ; i >= 0; i--) + for (i--; i >= 0; i--) devlink_region_destroy(devlink->cd_regions[i]); goto region_create_fail; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 554ba4669c..97163e1e57 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -57,8 +57,6 @@ #define CHECK_Q_STOP_TIMEOUT_US 1000000 #define CHECK_Q_STOP_STEP_US 10000 -#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) - static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx, unsigned int index) { @@ -162,7 +160,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool skb_reset_tail_pointer(skb); skb_put(skb, le16_to_cpu(gpd->data_buff_len)); - ret = md_ctrl->recv_skb(queue, skb); + ret = queue->recv_skb(queue, skb); /* Break processing, will try again later */ if (ret < 0) return ret; @@ -898,13 +896,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, /** * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. - * @md_ctrl: CLDMA context structure. + * @queue: CLDMA queue. * @recv_skb: Receiving skb callback. */ -void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, +void t7xx_cldma_set_recv_skb(struct cldma_queue *queue, int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) { - md_ctrl->recv_skb = recv_skb; + queue->recv_skb = recv_skb; } /** @@ -994,6 +992,28 @@ allow_sleep: return ret; } +static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) +{ + int qno; + + for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) { + md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; + t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb); + } + + md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ; + + for (qno = 0; qno < CLDMA_TXQ_NUM; qno++) + md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; + + if (cfg_id == CLDMA_DEDICATED_Q_CFG) { + md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; + md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; + t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP], + t7xx_port_proxy_recv_skb_from_dedicated_queue); + } +} + static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) { char dma_pool_name[32]; @@ -1019,16 +1039,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) dev_err(md_ctrl->dev, "control TX ring init fail\n"); goto err_free_tx_ring; } - - md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU; } for (j = 0; j < CLDMA_RXQ_NUM; j++) { - md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; - - if (j == CLDMA_RXQ_NUM - 1) - md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; - ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); if (ret) { dev_err(md_ctrl->dev, "Control RX ring init fail\n"); @@ -1095,6 +1108,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) { struct device *dev = &t7xx_dev->pdev->dev; struct cldma_ctrl *md_ctrl; + int qno; md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); if (!md_ctrl) @@ -1103,7 +1117,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) md_ctrl->t7xx_dev = t7xx_dev; md_ctrl->dev = dev; md_ctrl->hif_id = hif_id; - md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; + for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) + md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb; + t7xx_hw_info_init(md_ctrl); t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; return 0; @@ -1333,9 +1349,10 @@ err_workqueue: return -ENOMEM; } -void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) { t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_adjust_config(md_ctrl, cfg_id); t7xx_cldma_late_init(md_ctrl); } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h index 4410bac699..f2d9941be9 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -31,6 +31,10 @@ #include "t7xx_cldma.h" #include "t7xx_pci.h" +#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) +#define CLDMA_SHARED_Q_BUFF_SZ 3584 +#define CLDMA_DEDICATED_Q_BUFF_SZ 2048 + /** * enum cldma_id - Identifiers for CLDMA HW units. * @CLDMA_ID_MD: Modem control channel. @@ -55,6 +59,11 @@ struct cldma_gpd { __le16 not_used2; }; +enum cldma_cfg { + CLDMA_SHARED_Q_CFG, + CLDMA_DEDICATED_Q_CFG, +}; + struct cldma_request { struct cldma_gpd *gpd; /* Virtual address for CPU */ dma_addr_t gpd_addr; /* Physical address for DMA */ @@ -82,6 +91,7 @@ struct cldma_queue { wait_queue_head_t req_wq; /* Only for TX */ struct workqueue_struct *worker; struct work_struct cldma_work; + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); }; struct cldma_ctrl { @@ -101,24 +111,22 @@ struct cldma_ctrl { struct md_pm_entity *pm_entity; struct t7xx_cldma_hw hw_info; bool is_late_init; - int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); }; +#define CLDMA_Q_IDX_DUMP 1 #define GPD_FLAGS_HWO BIT(0) #define GPD_FLAGS_IOC BIT(7) #define GPD_DMAPOOL_ALIGN 16 -#define CLDMA_MTU 3584 /* 3.5kB */ - int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); -void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id); void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); -void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, +void t7xx_cldma_set_recv_skb(struct cldma_queue *queue, int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 24e7d49146..8d864d4ed7 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -177,6 +177,11 @@ int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) return t7xx_acpi_reset(t7xx_dev, "_RST"); } +int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_acpi_reset(t7xx_dev, "MRST._RST"); +} + static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) { u32 val; @@ -192,6 +197,7 @@ static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) { struct t7xx_pci_dev *t7xx_dev = data; + t7xx_mode_update(t7xx_dev, T7XX_RESET); msleep(RGU_RESET_DELAY_MS); t7xx_reset_device_via_pmic(t7xx_dev); return IRQ_HANDLED; @@ -529,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work) /* Clear the HS2 EXIT event appended in core_reset() */ t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); - t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG); t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); md->core_md.handshake_ongoing = true; @@ -544,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work) /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); - t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG); t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); md->core_ap.handshake_ongoing = true; t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); @@ -758,6 +764,7 @@ err_destroy_hswq: void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) { + enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); struct t7xx_modem *md = t7xx_dev->md; t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); @@ -765,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) if (!md->md_init_finish) return; - t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + if (mode != T7XX_RESET && mode != T7XX_UNKNOWN) + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); t7xx_port_proxy_uninit(md->port_prox); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h index abe633cf7a..b39e945a92 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -85,6 +85,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); +int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev); int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); #endif /* __T7XX_MODEM_OPS_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 91256e005b..e0b1e7a616 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -52,6 +52,81 @@ #define PM_RESOURCE_POLL_TIMEOUT_US 10000 #define PM_RESOURCE_POLL_STEP_US 100 +static const char * const t7xx_mode_names[] = { + [T7XX_UNKNOWN] = "unknown", + [T7XX_READY] = "ready", + [T7XX_RESET] = "reset", + [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching", + [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download", + [T7XX_FASTBOOT_DUMP] = "fastboot_dump", +}; + +static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST); + +static ssize_t t7xx_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct t7xx_pci_dev *t7xx_dev; + struct pci_dev *pdev; + int index = 0; + + pdev = to_pci_dev(dev); + t7xx_dev = pci_get_drvdata(pdev); + if (!t7xx_dev) + return -ENODEV; + + index = sysfs_match_string(t7xx_mode_names, buf); + if (index == T7XX_FASTBOOT_SWITCHING) { + WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING); + } else if (index == T7XX_RESET) { + WRITE_ONCE(t7xx_dev->mode, T7XX_RESET); + t7xx_acpi_pldr_func(t7xx_dev); + } + + return count; +}; + +static ssize_t t7xx_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + enum t7xx_mode mode = T7XX_UNKNOWN; + struct t7xx_pci_dev *t7xx_dev; + struct pci_dev *pdev; + + pdev = to_pci_dev(dev); + t7xx_dev = pci_get_drvdata(pdev); + if (!t7xx_dev) + return -ENODEV; + + mode = READ_ONCE(t7xx_dev->mode); + if (mode < T7XX_MODE_LAST) + return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]); + + return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]); +} + +static DEVICE_ATTR_RW(t7xx_mode); + +static struct attribute *t7xx_mode_attr[] = { + &dev_attr_t7xx_mode.attr, + NULL +}; + +static const struct attribute_group t7xx_mode_attribute_group = { + .attrs = t7xx_mode_attr, +}; + +void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode) +{ + if (!t7xx_dev) + return; + + WRITE_ONCE(t7xx_dev->mode, mode); + sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode"); +} + enum t7xx_pm_state { MTK_PM_EXCEPTION, MTK_PM_INIT, /* Device initialized, but handshake not completed */ @@ -108,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); pm_runtime_use_autosuspend(&pdev->dev); - return t7xx_wait_pm_config(t7xx_dev); + return 0; } void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) @@ -279,7 +354,8 @@ static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) int ret; t7xx_dev = pci_get_drvdata(pdev); - if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT || + READ_ONCE(t7xx_dev->mode) != T7XX_READY) { dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); return -EFAULT; } @@ -729,16 +805,28 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) t7xx_pcie_mac_interrupts_dis(t7xx_dev); + ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj, + &t7xx_mode_attribute_group); + if (ret) + goto err_md_exit; + ret = t7xx_interrupt_init(t7xx_dev); - if (ret) { - t7xx_md_exit(t7xx_dev); - return ret; - } + if (ret) + goto err_remove_group; + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); t7xx_pcie_mac_interrupts_en(t7xx_dev); return 0; + +err_remove_group: + sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, + &t7xx_mode_attribute_group); + +err_md_exit: + t7xx_md_exit(t7xx_dev); + return ret; } static void t7xx_pci_remove(struct pci_dev *pdev) @@ -747,6 +835,9 @@ static void t7xx_pci_remove(struct pci_dev *pdev) int i; t7xx_dev = pci_get_drvdata(pdev); + + sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, + &t7xx_mode_attribute_group); t7xx_md_exit(t7xx_dev); for (i = 0; i < EXT_INT_NUM; i++) { diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index f08f1ab744..49a11586d8 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -43,6 +43,16 @@ struct t7xx_addr_base { typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); +enum t7xx_mode { + T7XX_UNKNOWN, + T7XX_READY, + T7XX_RESET, + T7XX_FASTBOOT_SWITCHING, + T7XX_FASTBOOT_DOWNLOAD, + T7XX_FASTBOOT_DUMP, + T7XX_MODE_LAST, /* must always be last */ +}; + /* struct t7xx_pci_dev - MTK device context structure * @intr_handler: array of handler function for request_threaded_irq * @intr_thread: array of thread_fn for request_threaded_irq @@ -59,6 +69,7 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); * @md_pm_lock: protects PCIe sleep lock * @sleep_disable_count: PCIe L1.2 lock counter * @sleep_lock_acquire: indicates that sleep has been disabled + * @mode: indicates the device mode */ struct t7xx_pci_dev { t7xx_intr_callback intr_handler[EXT_INT_NUM]; @@ -82,6 +93,7 @@ struct t7xx_pci_dev { #ifdef CONFIG_WWAN_DEBUGFS struct dentry *debugfs_dir; #endif + u32 mode; }; enum t7xx_pm_id { @@ -120,5 +132,5 @@ int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_enti int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); - +void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode); #endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h index 4ae8a00a85..f74d3bab81 100644 --- a/drivers/net/wwan/t7xx/t7xx_port.h +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -75,6 +75,8 @@ enum port_ch { PORT_CH_DSS6_TX = 0x20df, PORT_CH_DSS7_RX = 0x20e0, PORT_CH_DSS7_TX = 0x20e1, + + PORT_CH_UNIMPORTANT = 0xffff, }; struct t7xx_port; @@ -135,11 +137,13 @@ struct t7xx_port { }; }; +int t7xx_get_port_mtu(struct t7xx_port *port); struct sk_buff *t7xx_port_alloc_skb(int payload); struct sk_buff *t7xx_ctrl_alloc_skb(int payload); int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, unsigned int ex_msg); +int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb); int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, unsigned int ex_msg); diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index 274846d39f..7d6388bf1d 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -48,6 +48,9 @@ i < (proxy)->port_count; \ i++, (p) = &(proxy)->ports[i]) +#define T7XX_MAX_POSSIBLE_PORTS_NUM \ + (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf))) + static const struct t7xx_port_conf t7xx_port_conf[] = { { .tx_ch = PORT_CH_UART2_TX, @@ -100,6 +103,21 @@ static const struct t7xx_port_conf t7xx_port_conf[] = { }, }; +static const struct t7xx_port_conf t7xx_early_port_conf[] = { + { + .tx_ch = PORT_CH_UNIMPORTANT, + .rx_ch = PORT_CH_UNIMPORTANT, + .txq_index = CLDMA_Q_IDX_DUMP, + .rxq_index = CLDMA_Q_IDX_DUMP, + .txq_exp_index = CLDMA_Q_IDX_DUMP, + .rxq_exp_index = CLDMA_Q_IDX_DUMP, + .path_id = CLDMA_ID_AP, + .ops = &wwan_sub_port_ops, + .name = "fastboot", + .port_type = WWAN_PORT_FASTBOOT, + }, +}; + static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) { const struct t7xx_port_conf *port_conf; @@ -214,7 +232,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) return 0; } -static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) +int t7xx_get_port_mtu(struct t7xx_port *port) +{ + enum cldma_id path_id = port->port_conf->path_id; + int tx_qno = t7xx_port_get_queue_no(port); + struct cldma_ctrl *md_ctrl; + + md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; + return md_ctrl->tx_ring[tx_qno].pkt_size; +} + +int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) { enum cldma_id path_id = port->port_conf->path_id; struct cldma_ctrl *md_ctrl; @@ -329,6 +357,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) } } +/** + * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb. + * @queue: CLDMA queue. + * @skb: Socket buffer. + * + * Return: + ** 0 - Packet consumed. + ** -ERROR - Failed to process skb. + */ +int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb) +{ + struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; + struct port_proxy *port_prox = t7xx_dev->md->port_prox; + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + int ret; + + port = &port_prox->ports[0]; + if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) { + dev_kfree_skb_any(skb); + return -EINVAL; + } + + port_conf = port->port_conf; + ret = port_conf->ops->recv_skb(port, skb); + if (ret < 0 && ret != -ENOBUFS) { + dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret); + dev_kfree_skb_any(skb); + } + + return ret; +} + static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, struct cldma_queue *queue, u16 channel) { @@ -359,7 +420,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev ** 0 - Packet consumed. ** -ERROR - Failed to process skb. */ -static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) { struct ccci_header *ccci_h = (struct ccci_header *)skb->data; struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; @@ -444,33 +505,56 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) spin_lock_init(&port->port_update_lock); port->chan_enable = false; - if (port_conf->ops->init) + if (port_conf->ops && port_conf->ops->init) port_conf->ops->init(port); } t7xx_proxy_setup_ch_mapping(port_prox); } +void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id) +{ + struct port_proxy *port_prox = md->port_prox; + const struct t7xx_port_conf *port_conf; + u32 port_count; + int i; + + t7xx_port_proxy_uninit(port_prox); + + if (cfg_id == PORT_CFG_ID_EARLY) { + port_conf = t7xx_early_port_conf; + port_count = ARRAY_SIZE(t7xx_early_port_conf); + } else { + port_conf = t7xx_port_conf; + port_count = ARRAY_SIZE(t7xx_port_conf); + } + + for (i = 0; i < port_count; i++) + port_prox->ports[i].port_conf = &port_conf[i]; + + port_prox->cfg_id = cfg_id; + port_prox->port_count = port_count; + + t7xx_proxy_init_all_ports(md); +} + static int t7xx_proxy_alloc(struct t7xx_modem *md) { - unsigned int port_count = ARRAY_SIZE(t7xx_port_conf); struct device *dev = &md->t7xx_dev->pdev->dev; struct port_proxy *port_prox; - int i; - port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, + port_prox = devm_kzalloc(dev, + struct_size(port_prox, + ports, + T7XX_MAX_POSSIBLE_PORTS_NUM), GFP_KERNEL); if (!port_prox) return -ENOMEM; md->port_prox = port_prox; port_prox->dev = dev; + t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY); - for (i = 0; i < port_count; i++) - port_prox->ports[i].port_conf = &t7xx_port_conf[i]; - - port_prox->port_count = port_count; - t7xx_proxy_init_all_ports(md); return 0; } @@ -492,8 +576,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md) if (ret) return ret; - t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); - t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); return 0; } @@ -505,7 +587,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox) for_each_proxy_port(i, port, port_prox) { const struct t7xx_port_conf *port_conf = port->port_conf; - if (port_conf->ops->uninit) + if (port_conf->ops && port_conf->ops->uninit) port_conf->ops->uninit(port); } } diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h index 81d059fbc0..7f57068114 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -31,11 +31,18 @@ #define RX_QUEUE_MAXLEN 32 #define CTRL_QUEUE_MAXLEN 16 +enum port_cfg_id { + PORT_CFG_ID_INVALID, + PORT_CFG_ID_NORMAL, + PORT_CFG_ID_EARLY, +}; + struct port_proxy { int port_count; struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; struct device *dev; + enum port_cfg_id cfg_id; struct t7xx_port ports[]; }; @@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, bool en_flag); +void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id); +int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb); +int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb); #endif /* __T7XX_PORT_PROXY_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c index 17389c8f66..4b23ba693f 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2021, MediaTek Inc. * Copyright (c) 2021-2022, Intel Corporation. + * Copyright (c) 2024, Fibocom Wireless Inc. * * Authors: * Amir Hanania <amir.hanania@intel.com> @@ -15,6 +16,7 @@ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> * Eliot Lee <eliot.lee@intel.com> * Sreehari Kancharla <sreehari.kancharla@intel.com> + * Jinjian Song <jinjian.song@fibocom.com> */ #include <linux/atomic.h> @@ -33,7 +35,7 @@ #include "t7xx_port_proxy.h" #include "t7xx_state_monitor.h" -static int t7xx_port_ctrl_start(struct wwan_port *port) +static int t7xx_port_wwan_start(struct wwan_port *port) { struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); @@ -44,30 +46,60 @@ static int t7xx_port_ctrl_start(struct wwan_port *port) return 0; } -static void t7xx_port_ctrl_stop(struct wwan_port *port) +static void t7xx_port_wwan_stop(struct wwan_port *port) { struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); atomic_dec(&port_mtk->usage_cnt); } -static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +static int t7xx_port_fastboot_tx(struct t7xx_port *port, struct sk_buff *skb) +{ + struct sk_buff *cur = skb, *tx_skb; + size_t actual, len, offset = 0; + int txq_mtu; + int ret; + + txq_mtu = t7xx_get_port_mtu(port); + if (txq_mtu < 0) + return -EINVAL; + + actual = cur->len; + while (actual) { + len = min_t(size_t, actual, txq_mtu); + tx_skb = __dev_alloc_skb(len, GFP_KERNEL); + if (!tx_skb) + return -ENOMEM; + + skb_put_data(tx_skb, cur->data + offset, len); + + ret = t7xx_port_send_raw_skb(port, tx_skb); + if (ret) { + dev_kfree_skb(tx_skb); + dev_err(port->dev, "Write error on fastboot port, %d\n", ret); + break; + } + offset += len; + actual -= len; + } + + dev_kfree_skb(skb); + return 0; +} + +static int t7xx_port_ctrl_tx(struct t7xx_port *port, struct sk_buff *skb) { - struct t7xx_port *port_private = wwan_port_get_drvdata(port); const struct t7xx_port_conf *port_conf; struct sk_buff *cur = skb, *cloned; struct t7xx_fsm_ctl *ctl; enum md_state md_state; int cnt = 0, ret; - if (!port_private->chan_enable) - return -EINVAL; - - port_conf = port_private->port_conf; - ctl = port_private->t7xx_dev->md->fsm_ctl; + port_conf = port->port_conf; + ctl = port->t7xx_dev->md->fsm_ctl; md_state = t7xx_fsm_get_md_state(ctl); if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) { - dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n", + dev_warn(port->dev, "Cannot write to %s port when md_state=%d\n", port_conf->name, md_state); return -ENODEV; } @@ -75,10 +107,10 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) while (cur) { cloned = skb_clone(cur, GFP_KERNEL); cloned->len = skb_headlen(cur); - ret = t7xx_port_send_skb(port_private, cloned, 0, 0); + ret = t7xx_port_send_skb(port, cloned, 0, 0); if (ret) { dev_kfree_skb(cloned); - dev_err(port_private->dev, "Write error on %s port, %d\n", + dev_err(port->dev, "Write error on %s port, %d\n", port_conf->name, ret); return cnt ? cnt + ret : ret; } @@ -93,14 +125,53 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) return 0; } +static int t7xx_port_wwan_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct t7xx_port *port_private = wwan_port_get_drvdata(port); + const struct t7xx_port_conf *port_conf = port_private->port_conf; + int ret; + + if (!port_private->chan_enable) + return -EINVAL; + + if (port_conf->port_type != WWAN_PORT_FASTBOOT) + ret = t7xx_port_ctrl_tx(port_private, skb); + else + ret = t7xx_port_fastboot_tx(port_private, skb); + + return ret; +} + static const struct wwan_port_ops wwan_ops = { - .start = t7xx_port_ctrl_start, - .stop = t7xx_port_ctrl_stop, - .tx = t7xx_port_ctrl_tx, + .start = t7xx_port_wwan_start, + .stop = t7xx_port_wwan_stop, + .tx = t7xx_port_wwan_tx, }; +static void t7xx_port_wwan_create(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + unsigned int header_len = sizeof(struct ccci_header), mtu; + struct wwan_port_caps caps; + + if (!port->wwan.wwan_port) { + mtu = t7xx_get_port_mtu(port); + caps.frag_len = mtu - header_len; + caps.headroom_len = header_len; + port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, + &wwan_ops, &caps, port); + if (IS_ERR(port->wwan.wwan_port)) + dev_err(port->dev, "Unable to create WWAN port %s", port_conf->name); + } +} + static int t7xx_port_wwan_init(struct t7xx_port *port) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->port_type == WWAN_PORT_FASTBOOT) + t7xx_port_wwan_create(port); + port->rx_length_th = RX_QUEUE_MAXLEN; return 0; } @@ -152,20 +223,14 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) { const struct t7xx_port_conf *port_conf = port->port_conf; - unsigned int header_len = sizeof(struct ccci_header); - struct wwan_port_caps caps; + + if (port_conf->port_type == WWAN_PORT_FASTBOOT) + return; if (state != MD_STATE_READY) return; - if (!port->wwan.wwan_port) { - caps.frag_len = CLDMA_MTU - header_len; - caps.headroom_len = header_len; - port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, - &wwan_ops, &caps, port); - if (IS_ERR(port->wwan.wwan_port)) - dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); - } + t7xx_port_wwan_create(port); } struct port_ops wwan_sub_port_ops = { diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h index c41d7d094c..9c7dc72ac6 100644 --- a/drivers/net/wwan/t7xx/t7xx_reg.h +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -101,11 +101,33 @@ enum t7xx_pm_resume_state { PM_RESUME_REG_STATE_L2_EXP, }; +enum host_event_e { + HOST_EVENT_INIT = 0, + FASTBOOT_DL_NOTIFY = 0x3, +}; + #define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c #define MISC_STAGE_MASK GENMASK(2, 0) #define MISC_RESET_TYPE_PLDR BIT(26) #define MISC_RESET_TYPE_FLDR BIT(27) -#define LINUX_STAGE 4 +#define MISC_RESET_TYPE_PLDR BIT(26) +#define MISC_LK_EVENT_MASK GENMASK(11, 8) +#define HOST_EVENT_MASK GENMASK(31, 28) + +enum lk_event_id { + LK_EVENT_NORMAL = 0, + LK_EVENT_CREATE_PD_PORT = 1, + LK_EVENT_CREATE_POST_DL_PORT = 2, + LK_EVENT_RESET = 7, +}; + +enum t7xx_device_stage { + T7XX_DEV_STAGE_INIT = 0, + T7XX_DEV_STAGE_BROM_PRE = 1, + T7XX_DEV_STAGE_BROM_POST = 2, + T7XX_DEV_STAGE_LK = 3, + T7XX_DEV_STAGE_LINUX = 4, +}; #define T7XX_PCIE_RESOURCE_STATUS 0x0d28 #define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 0bc9743021..9889ca4621 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -47,6 +47,13 @@ #define FSM_MD_EX_PASS_TIMEOUT_MS 45000 #define FSM_CMD_TIMEOUT_MS 2000 +#define wait_for_expected_dev_stage(status) \ + read_poll_timeout(ioread32, status, \ + ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \ + ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \ + 20000000, false, IREG_BASE(md->t7xx_dev) + \ + T7XX_PCIE_MISC_DEV_STATUS) + void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) { struct t7xx_fsm_ctl *ctl = md->fsm_ctl; @@ -206,6 +213,55 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm fsm_finish_command(ctl, cmd, 0); } +static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id) +{ + u32 value; + + value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + value &= ~HOST_EVENT_MASK; + value |= FIELD_PREP(HOST_EVENT_MASK, event_id); + iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); +} + +static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status) +{ + struct t7xx_modem *md = ctl->md; + struct cldma_ctrl *md_ctrl; + enum lk_event_id lk_event; + struct device *dev; + struct t7xx_port *port; + + dev = &md->t7xx_dev->pdev->dev; + lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status); + switch (lk_event) { + case LK_EVENT_NORMAL: + case LK_EVENT_RESET: + break; + + case LK_EVENT_CREATE_PD_PORT: + case LK_EVENT_CREATE_POST_DL_PORT: + md_ctrl = md->md_ctrl[CLDMA_ID_AP]; + t7xx_cldma_hif_hw_init(md_ctrl); + t7xx_cldma_stop(md_ctrl); + t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG); + + port = &ctl->md->port_prox->ports[0]; + port->port_conf->ops->enable_chl(port); + + t7xx_cldma_start(md_ctrl); + + if (lk_event == LK_EVENT_CREATE_POST_DL_PORT) + t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD); + else + t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP); + break; + + default: + dev_err(dev, "Invalid LK event %d\n", lk_event); + break; + } +} + static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) { ctl->curr_state = FSM_STATE_STOPPED; @@ -226,8 +282,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) { - struct t7xx_pci_dev *t7xx_dev; - struct cldma_ctrl *md_ctrl; + struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; + struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev; + enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); int err; if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { @@ -235,18 +292,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma return; } - md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; - t7xx_dev = ctl->md->t7xx_dev; - ctl->curr_state = FSM_STATE_STOPPING; t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); t7xx_cldma_stop(md_ctrl); - if (!ctl->md->rgu_irq_asserted) { - t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); - /* Wait for the DRM disable to take effect */ - msleep(FSM_DRM_DISABLE_DELAY_MS); + if (mode == T7XX_FASTBOOT_SWITCHING) + t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY); + + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); + /* Wait for the DRM disable to take effect */ + msleep(FSM_DRM_DISABLE_DELAY_MS); + if (mode == T7XX_FASTBOOT_SWITCHING) { + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); + } else { err = t7xx_acpi_fldr_func(t7xx_dev); if (err) t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); @@ -272,6 +331,7 @@ static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) ctl->curr_state = FSM_STATE_READY; t7xx_fsm_broadcast_ready_state(ctl); + t7xx_mode_update(md->t7xx_dev, T7XX_READY); t7xx_md_event_notify(md, FSM_READY); } @@ -317,7 +377,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) { struct t7xx_modem *md = ctl->md; - u32 dev_status; + struct device *dev; + u32 status; int ret; if (!md) @@ -329,23 +390,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command return; } + dev = &md->t7xx_dev->pdev->dev; ctl->curr_state = FSM_STATE_PRE_START; t7xx_md_event_notify(md, FSM_PRE_START); - ret = read_poll_timeout(ioread32, dev_status, - (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, - false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + ret = wait_for_expected_dev_stage(status); + if (ret) { - struct device *dev = &md->t7xx_dev->pdev->dev; + dev_err(dev, "read poll timeout %d\n", ret); + goto finish_command; + } - fsm_finish_command(ctl, cmd, -ETIMEDOUT); - dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); - return; + if (status != ctl->status || cmd->flag != 0) { + u32 stage = FIELD_GET(MISC_STAGE_MASK, status); + + switch (stage) { + case T7XX_DEV_STAGE_INIT: + case T7XX_DEV_STAGE_BROM_PRE: + case T7XX_DEV_STAGE_BROM_POST: + dev_dbg(dev, "BROM_STAGE Entered\n"); + ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0); + break; + + case T7XX_DEV_STAGE_LK: + dev_dbg(dev, "LK_STAGE Entered\n"); + t7xx_lk_stage_event_handling(ctl, status); + break; + + case T7XX_DEV_STAGE_LINUX: + dev_dbg(dev, "LINUX_STAGE Entered\n"); + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | + D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK); + if (cmd->flag == 0) + break; + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL); + ret = fsm_routine_starting(ctl); + break; + + default: + break; + } + ctl->status = status; } - t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); - t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); - fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); +finish_command: + fsm_finish_command(ctl, cmd, ret); } static int fsm_main_thread(void *data) @@ -517,6 +608,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md) fsm_flush_event_cmd_qs(ctl); ctl->curr_state = FSM_STATE_STOPPED; ctl->exp_flg = false; + ctl->status = T7XX_DEV_STAGE_INIT; } int t7xx_fsm_init(struct t7xx_modem *md) diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h index b0b3662ae6..7b0a9baf48 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -96,6 +96,7 @@ struct t7xx_fsm_ctl { bool exp_flg; spinlock_t notifier_lock; /* Protects notifier list */ struct list_head notifier_list; + u32 status; /* Device boot stage */ }; struct t7xx_fsm_event { diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 72e01e550a..17431f1b1a 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -26,7 +26,9 @@ static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ -static struct class *wwan_class; +static const struct class wwan_class = { + .name = "wwan", +}; static int wwan_major; static struct dentry *wwan_debugfs_dir; @@ -130,7 +132,7 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) { struct device *dev; - dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match); + dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match); if (!dev) return ERR_PTR(-ENODEV); @@ -147,7 +149,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name) { struct device *dev; - dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match); + dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match); if (!dev) return ERR_PTR(-ENODEV); @@ -183,7 +185,7 @@ static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir) { struct device *dev; - dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match); + dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match); if (!dev) return ERR_PTR(-ENODEV); @@ -239,7 +241,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent) } wwandev->dev.parent = parent; - wwandev->dev.class = wwan_class; + wwandev->dev.class = &wwan_class; wwandev->dev.type = &wwan_dev_type; wwandev->id = id; dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); @@ -265,7 +267,7 @@ done_unlock: static int is_wwan_child(struct device *dev, void *data) { - return dev->class == wwan_class; + return dev->class == &wwan_class; } static void wwan_remove_dev(struct wwan_device *wwandev) @@ -328,6 +330,10 @@ static const struct { .name = "XMMRPC", .devsuf = "xmmrpc", }, + [WWAN_PORT_FASTBOOT] = { + .name = "FASTBOOT", + .devsuf = "fastboot", + }, }; static ssize_t type_show(struct device *dev, struct device_attribute *attr, @@ -371,7 +377,7 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) { struct device *dev; - dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match); + dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match); if (!dev) return ERR_PTR(-ENODEV); @@ -401,7 +407,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) return -ENOMEM; /* Collect ids of same name format ports */ - class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type); + class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type); while ((dev = class_dev_iter_next(&iter))) { if (dev->parent != &wwandev->dev) continue; @@ -473,7 +479,7 @@ struct wwan_port *wwan_create_port(struct device *parent, mutex_init(&port->data_lock); port->dev.parent = &wwandev->dev; - port->dev.class = wwan_class; + port->dev.class = &wwan_class; port->dev.type = &wwan_port_dev_type; port->dev.devt = MKDEV(wwan_major, minor); dev_set_drvdata(&port->dev, drvdata); @@ -916,7 +922,7 @@ static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } -static struct device_type wwan_type = { .name = "wwan" }; +static const struct device_type wwan_type = { .name = "wwan" }; static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[], const char *ifname, @@ -1208,11 +1214,9 @@ static int __init wwan_init(void) if (err) return err; - wwan_class = class_create("wwan"); - if (IS_ERR(wwan_class)) { - err = PTR_ERR(wwan_class); + err = class_register(&wwan_class); + if (err) goto unregister; - } /* chrdev used for wwan ports */ wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port", @@ -1229,7 +1233,7 @@ static int __init wwan_init(void) return 0; destroy: - class_destroy(wwan_class); + class_unregister(&wwan_class); unregister: rtnl_link_unregister(&wwan_rtnl_link_ops); return err; @@ -1240,7 +1244,7 @@ static void __exit wwan_exit(void) debugfs_remove_recursive(wwan_debugfs_dir); __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port"); rtnl_link_unregister(&wwan_rtnl_link_ops); - class_destroy(wwan_class); + class_unregister(&wwan_class); } module_init(wwan_init); diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index ff3dd24ddb..b02befd1b6 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -25,7 +25,9 @@ static int wwan_hwsim_devsnum = 2; module_param_named(devices, wwan_hwsim_devsnum, int, 0444); MODULE_PARM_DESC(devices, "Number of simulated devices"); -static struct class *wwan_hwsim_class; +static const struct class wwan_hwsim_class = { + .name = "wwan_hwsim", +}; static struct dentry *wwan_hwsim_debugfs_topdir; static struct dentry *wwan_hwsim_debugfs_devcreate; @@ -277,7 +279,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void) spin_unlock(&wwan_hwsim_devs_lock); dev->dev.release = wwan_hwsim_dev_release; - dev->dev.class = wwan_hwsim_class; + dev->dev.class = &wwan_hwsim_class; dev_set_name(&dev->dev, "hwsim%u", dev->id); spin_lock_init(&dev->ports_lock); @@ -511,11 +513,9 @@ static int __init wwan_hwsim_init(void) if (!wwan_wq) return -ENOMEM; - wwan_hwsim_class = class_create("wwan_hwsim"); - if (IS_ERR(wwan_hwsim_class)) { - err = PTR_ERR(wwan_hwsim_class); + err = class_register(&wwan_hwsim_class); + if (err) goto err_wq_destroy; - } wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL); wwan_hwsim_debugfs_devcreate = @@ -534,7 +534,7 @@ err_clean_devs: wwan_hwsim_free_devs(); flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); - class_destroy(wwan_hwsim_class); + class_unregister(&wwan_hwsim_class); err_wq_destroy: destroy_workqueue(wwan_wq); @@ -547,7 +547,7 @@ static void __exit wwan_hwsim_exit(void) wwan_hwsim_free_devs(); flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); - class_destroy(wwan_hwsim_class); + class_unregister(&wwan_hwsim_class); destroy_workqueue(wwan_wq); } |