summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host')
-rw-r--r--drivers/ufs/host/ufs-mediatek.c90
-rw-r--r--drivers/ufs/host/ufs-mediatek.h7
-rw-r--r--drivers/ufs/host/ufs-qcom.c23
3 files changed, 79 insertions, 41 deletions
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 776bca4f70..b8a8801322 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
@@ -626,21 +625,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
-static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- if (!host || !host->pm_qos_init)
- return;
-
- cpu_latency_qos_update_request(&host->pm_qos_req,
- boost ? 0 : PM_QOS_DEFAULT_VALUE);
-}
-
static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
{
ufs_mtk_boost_crypt(hba, scale_up);
- ufs_mtk_boost_pm_qos(hba, scale_up);
}
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
@@ -660,6 +647,45 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
}
}
+static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+
+ if (!is_mcq_enabled(hba))
+ return;
+
+ if (host->mcq_nr_intr == 0)
+ return;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ disable_irq(irq);
+ }
+ host->is_mcq_intr_enabled = false;
+}
+
+static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+
+ if (!is_mcq_enabled(hba))
+ return;
+
+ if (host->mcq_nr_intr == 0)
+ return;
+
+ if (host->is_mcq_intr_enabled == true)
+ return;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ enable_irq(irq);
+ }
+ host->is_mcq_intr_enabled = true;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -703,8 +729,10 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
if (clk_pwr_off)
ufs_mtk_pwr_ctrl(hba, false);
+ ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
+ ufs_mtk_mcq_enable_irq(hba);
}
return ret;
@@ -893,6 +921,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
const struct of_device_id *id;
struct device *dev = hba->dev;
struct ufs_mtk_host *host;
+ struct Scsi_Host *shost = hba->host;
int err = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -937,6 +966,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ /* Set runtime pm delay to replace default */
+ shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
+
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
@@ -959,10 +991,6 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
- /* Initialize pm-qos request */
- cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
- host->pm_qos_init = true;
-
goto out;
out_variant_clear:
@@ -1206,25 +1234,29 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
err = ufshcd_uic_hibern8_exit(hba);
- if (!err)
- ufshcd_set_link_active(hba);
- else
+ if (err)
return err;
- if (!hba->mcq_enabled) {
- err = ufshcd_make_hba_operational(hba);
- } else {
- ufs_mtk_config_mcq(hba, false);
- ufshcd_mcq_make_queues_operational(hba);
- ufshcd_mcq_config_mac(hba, hba->nutrs);
- /* Enable MCQ mode */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
- REG_UFS_MEM_CFG);
+ /* Check link state to make sure exit h8 success */
+ ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+ if (err) {
+ dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
+ return err;
}
+ ufshcd_set_link_active(hba);
+ err = ufshcd_make_hba_operational(hba);
if (err)
return err;
+ if (is_mcq_enabled(hba)) {
+ ufs_mtk_config_mcq(hba, false);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+ ufshcd_mcq_enable(hba);
+ }
+
return 0;
}
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index f76e80d917..fb53882f42 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,7 +7,6 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
-#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
@@ -167,7 +166,6 @@ struct ufs_mtk_mcq_intr_info {
struct ufs_mtk_host {
struct phy *mphy;
- struct pm_qos_request pm_qos_req;
struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
@@ -178,7 +176,6 @@ struct ufs_mtk_host {
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
- bool pm_qos_init;
bool unipro_lpm;
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
@@ -186,10 +183,14 @@ struct ufs_mtk_host {
u32 ip_ver;
bool mcq_set_intr;
+ bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
+/* MTK delay of autosuspend: 500 ms */
+#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500
+
/*
* Multi-VCC by Numbering
*/
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index e5a4bf1c55..62c343444d 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -855,15 +855,20 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
struct ufs_host_params *host_params = &host->host_params;
u32 val, dev_major;
+ /*
+ * Default to powering up the PHY to the max gear possible, which is
+ * backwards compatible with lower gears but not optimal from
+ * a power usage point of view. After device negotiation, if the
+ * gear is lower a reinit will be performed to program the PHY
+ * to the ideal gear for this combo of controller and device.
+ */
host->phy_gear = host_params->hs_tx_gear;
if (host->hw_ver.major < 0x4) {
/*
- * For controllers whose major HW version is < 4, power up the
- * PHY using minimum supported gear (UFS_HS_G2). Switching to
- * max gear will be performed during reinit if supported.
- * For newer controllers, whose major HW version is >= 4, power
- * up the PHY using max supported gear.
+ * These controllers only have one PHY init sequence,
+ * let's power up the PHY using that (the minimum supported
+ * gear, UFS_HS_G2).
*/
host->phy_gear = UFS_HS_G2;
} else if (host->hw_ver.major >= 0x5) {
@@ -1726,8 +1731,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
- ufs_qcom_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
return ret;
@@ -1756,7 +1761,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
devm_free_irq(hba->dev, desc->irq, hba);
}
msi_unlock_descs(hba->dev);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
host->hw_ver.step == 0)
@@ -1832,7 +1837,7 @@ static void ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {