summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/host
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/ufs/host
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/ufs/host/Kconfig127
-rw-r--r--drivers/ufs/host/Makefile15
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c343
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c150
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c108
-rw-r--r--drivers/ufs/host/tc-dwc-g210.c319
-rw-r--r--drivers/ufs/host/tc-dwc-g210.h18
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c96
-rw-r--r--drivers/ufs/host/ufs-exynos.c1764
-rw-r--r--drivers/ufs/host/ufs-exynos.h270
-rw-r--r--drivers/ufs/host/ufs-hisi.c607
-rw-r--r--drivers/ufs/host/ufs-hisi.h104
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h59
-rw-r--r--drivers/ufs/host/ufs-mediatek.c1669
-rw-r--r--drivers/ufs/host/ufs-mediatek.h226
-rw-r--r--drivers/ufs/host/ufs-qcom-ice.c244
-rw-r--r--drivers/ufs/host/ufs-qcom.c1520
-rw-r--r--drivers/ufs/host/ufs-qcom.h270
-rw-r--r--drivers/ufs/host/ufs-renesas.c412
-rw-r--r--drivers/ufs/host/ufshcd-dwc.c150
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h25
-rw-r--r--drivers/ufs/host/ufshcd-pci.c632
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c401
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h38
-rw-r--r--drivers/ufs/host/ufshci-dwc.h33
25 files changed, 9600 insertions, 0 deletions
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
new file mode 100644
index 000000000..9b39fd760
--- /dev/null
+++ b/drivers/ufs/host/Kconfig
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Kernel configuration file for the UFS host controller drivers.
+#
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on PCI
+ help
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config SCSI_UFS_DWC_TC_PCI
+ tristate "DesignWare pci support using a G210 Test Chip"
+ depends on SCSI_UFSHCD_PCI
+ help
+ Synopsys Test Chip is a PHY for prototyping purposes.
+
+ If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+ tristate "Platform bus based UFS Controller support"
+ depends on HAS_IOMEM
+ help
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config SCSI_UFS_CDNS_PLATFORM
+ tristate "Cadence UFS Controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Cadence-specific additions to UFSHCD platform driver.
+
+ If unsure, say N.
+
+config SCSI_UFS_DWC_TC_PLATFORM
+ tristate "DesignWare platform support using a G210 Test Chip"
+ depends on SCSI_UFSHCD_PLATFORM
+ help
+ Synopsys Test Chip is a PHY for prototyping purposes.
+
+ If unsure, say N.
+
+config SCSI_UFS_QCOM
+ tristate "QCOM specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
+ select QCOM_SCM if SCSI_UFS_CRYPTO
+ select RESET_CONTROLLER
+ help
+ This selects the QCOM specific additions to UFSHCD platform driver.
+ UFS host on QCOM needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on QCOM chipset.
+ If unsure, say N.
+
+config SCSI_UFS_MEDIATEK
+ tristate "Mediatek specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
+ depends on RESET_CONTROLLER
+ select PHY_MTK_UFS
+ select RESET_TI_SYSCON
+ help
+ This selects the Mediatek specific additions to UFSHCD platform driver.
+ UFS host on Mediatek needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Mediatek chipset.
+
+ If unsure, say N.
+
+config SCSI_UFS_HISI
+ tristate "Hisilicon specific hooks to UFS controller platform driver"
+ depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Hisilicon specific additions to UFSHCD platform driver.
+
+ Select this if you have UFS controller on Hisilicon chipset.
+ If unsure, say N.
+
+config SCSI_UFS_RENESAS
+ tristate "Renesas specific hooks to UFS controller platform driver"
+ depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Renesas specific additions to UFSHCD platform driver.
+ UFS host on Renesas needs some vendor specific configuration before
+ accessing the hardware.
+
+ Select this if you have UFS controller on Renesas chipset.
+
+ If unsure, say N.
+
+config SCSI_UFS_TI_J721E
+ tristate "TI glue layer for Cadence UFS Controller"
+ depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
+ help
+ This selects driver for TI glue layer for Cadence UFS Host
+ Controller IP.
+
+ Selects this if you have TI platform with UFS controller.
+ If unsure, say N.
+
+config SCSI_UFS_EXYNOS
+ tristate "Exynos specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
+ help
+ This selects the Samsung Exynos SoC specific additions to UFSHCD
+ platform driver. UFS host on Samsung Exynos SoC includes HCI and
+ UNIPRO layer, and associates with UFS-PHY driver.
+
+ Select this if you have UFS host controller on Samsung Exynos SoC.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
new file mode 100644
index 000000000..7717ca93e
--- /dev/null
+++ b/drivers/ufs/host/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
+obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
+obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o
+ufs_qcom-y += ufs-qcom.o
+ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o
+obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
+obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
new file mode 100644
index 000000000..e05c0ae64
--- /dev/null
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform UFS Host driver for Cadence controller
+ *
+ * Copyright (C) 2018 Cadence Design Systems, Inc.
+ *
+ * Authors:
+ * Jan Kotas <jank@cadence.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/time.h>
+
+#include "ufshcd-pltfrm.h"
+
+#define CDNS_UFS_REG_HCLKDIV 0xFC
+#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+ /**
+ * cdns_ufs_dme_attr_val - for storing L4 attributes
+ */
+ u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ &host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ &host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ &host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ &host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ &host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ &host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+ &host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+ host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_hclkdiv()
+ * Sets HCLKDIV register value based on the core_clk
+ * @hba: host controller instance
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_div = 0;
+
+ if (list_empty(head))
+ return 0;
+
+ list_for_each_entry(clki, head, list) {
+ if (IS_ERR_OR_NULL(clki->clk))
+ continue;
+ if (!strcmp(clki->name, "core_clk"))
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
+
+ if (!core_clk_rate) {
+ dev_err(hba->dev, "%s: unable to find core_clk rate\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ core_clk_div = core_clk_rate / USEC_PER_SEC;
+
+ ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
+ /**
+ * Make sure the register was updated,
+ * UniPro layer will not work with an incorrect value.
+ */
+ mb();
+
+ return 0;
+}
+
+/**
+ * cdns_ufs_hce_enable_notify()
+ * Called before and after HCE enable bit is set.
+ * @hba: host controller instance
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status != PRE_CHANGE)
+ return 0;
+
+ return cdns_ufs_set_hclkdiv(hba);
+}
+
+/**
+ * cdns_ufs_hibern8_notify()
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+ cdns_ufs_get_l4_attr(hba);
+ if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+ cdns_ufs_set_l4_attr(hba);
+}
+
+/**
+ * cdns_ufs_link_startup_notify()
+ * Called before and after Link startup is carried out.
+ * @hba: host controller instance
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status != PRE_CHANGE)
+ return 0;
+
+ /*
+ * Some UFS devices have issues if LCC is enabled.
+ * So we are setting PA_Local_TX_LCC_Enable to 0
+ * before link startup which will make sure that both host
+ * and device TX LCC are disabled once link startup is
+ * completed.
+ */
+ ufshcd_disable_host_tx_lcc(hba);
+
+ /*
+ * Disabling Autohibern8 feature in cadence UFS
+ * to mask unexpected interrupt trigger.
+ */
+ hba->ahit = 0;
+
+ return 0;
+}
+
+/**
+ * cdns_ufs_init - performs additional ufs initialization
+ * @hba: host controller instance
+ *
+ * Returns status of initialization
+ */
+static int cdns_ufs_init(struct ufs_hba *hba)
+{
+ int status = 0;
+ struct cdns_ufs_host *host;
+ struct device *dev = hba->dev;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
+
+ status = ufshcd_vops_phy_initialization(hba);
+
+ return status;
+}
+
+/**
+ * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
+ * @hba: host controller instance
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
+{
+ u32 data;
+
+ /* Increase RX_Advanced_Min_ActivateTime_Capability */
+ data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
+ data |= BIT(24);
+ ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
+ .name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
+ .hce_enable_notify = cdns_ufs_hce_enable_notify,
+ .link_startup_notify = cdns_ufs_link_startup_notify,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
+};
+
+static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
+ .name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
+ .hce_enable_notify = cdns_ufs_hce_enable_notify,
+ .link_startup_notify = cdns_ufs_link_startup_notify,
+ .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
+};
+
+static const struct of_device_id cdns_ufs_of_match[] = {
+ {
+ .compatible = "cdns,ufshc",
+ .data = &cdns_ufs_pltfm_hba_vops,
+ },
+ {
+ .compatible = "cdns,ufshc-m31-16nm",
+ .data = &cdns_ufs_m31_16nm_pltfm_hba_vops,
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
+
+/**
+ * cdns_ufs_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
+{
+ int err;
+ const struct of_device_id *of_id;
+ struct ufs_hba_variant_ops *vops;
+ struct device *dev = &pdev->dev;
+
+ of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
+ vops = (struct ufs_hba_variant_ops *)of_id->data;
+
+ /* Perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * cdns_ufs_pltfrm_remove - removes the ufs driver
+ * @pdev: pointer to platform device handle
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver cdns_ufs_pltfrm_driver = {
+ .probe = cdns_ufs_pltfrm_probe,
+ .remove = cdns_ufs_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "cdns-ufshcd",
+ .pm = &cdns_ufs_dev_pm_ops,
+ .of_match_table = cdns_ufs_of_match,
+ },
+};
+
+module_platform_driver(cdns_ufs_pltfrm_driver);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c
new file mode 100644
index 000000000..92b8ad4b5
--- /dev/null
+++ b/drivers/ufs/host/tc-dwc-g210-pci.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-dwc.h"
+#include "tc-dwc-g210.h"
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+/* Test Chip type expected values */
+#define TC_G210_20BIT 20
+#define TC_G210_40BIT 40
+#define TC_G210_INV 0
+
+static int tc_type = TC_G210_INV;
+module_param(tc_type, int, 0);
+MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
+
+/*
+ * struct ufs_hba_dwc_vops - UFS DWC specific variant operations
+ */
+static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = {
+ .name = "tc-dwc-g210-pci",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+};
+
+/**
+ * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev: pointer to PCI handle
+ */
+static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+}
+
+/**
+ * tc_dwc_g210_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ /* Check Test Chip type and set the specific setup routine */
+ if (tc_type == TC_G210_20BIT) {
+ tc_dwc_g210_pci_hba_vops.phy_initialization =
+ tc_dwc_g210_config_20_bit;
+ } else if (tc_type == TC_G210_40BIT) {
+ tc_dwc_g210_pci_hba_vops.phy_initialization =
+ tc_dwc_g210_config_40_bit;
+ } else {
+ dev_err(&pdev->dev, "test chip version not specified\n");
+ return -EPERM;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
+ }
+
+ mmio_base = pcim_iomap_table(pdev)[0];
+
+ err = ufshcd_alloc_host(&pdev->dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ return err;
+ }
+
+ hba->vops = &tc_dwc_g210_pci_hba_vops;
+
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ return err;
+ }
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
+ { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl);
+
+static struct pci_driver tc_dwc_g210_pci_driver = {
+ .name = "tc-dwc-g210-pci",
+ .id_table = tc_dwc_g210_pci_tbl,
+ .probe = tc_dwc_g210_pci_probe,
+ .remove = tc_dwc_g210_pci_remove,
+ .shutdown = tc_dwc_g210_pci_shutdown,
+ .driver = {
+ .pm = &tc_dwc_g210_pci_pm_ops
+ },
+};
+
+module_pci_driver(tc_dwc_g210_pci_driver);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
new file mode 100644
index 000000000..f15a84d0c
--- /dev/null
+++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include "ufshcd-pltfrm.h"
+#include "ufshcd-dwc.h"
+#include "tc-dwc-g210.h"
+
+/*
+ * UFS DWC specific variant operations
+ */
+static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
+ .name = "tc-dwc-g210-pltfm",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+ .phy_initialization = tc_dwc_g210_config_20_bit,
+};
+
+static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
+ .name = "tc-dwc-g210-pltfm",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+ .phy_initialization = tc_dwc_g210_config_40_bit,
+};
+
+static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
+ {
+ .compatible = "snps,g210-tc-6.00-20bit",
+ .data = &tc_dwc_g210_20bit_pltfm_hba_vops,
+ },
+ {
+ .compatible = "snps,g210-tc-6.00-40bit",
+ .data = &tc_dwc_g210_40bit_pltfm_hba_vops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
+
+/**
+ * tc_dwc_g210_pltfm_probe()
+ * @pdev: pointer to platform device structure
+ *
+ */
+static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
+{
+ int err;
+ const struct of_device_id *of_id;
+ struct ufs_hba_variant_ops *vops;
+ struct device *dev = &pdev->dev;
+
+ of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
+ vops = (struct ufs_hba_variant_ops *)of_id->data;
+
+ /* Perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * tc_dwc_g210_pltfm_remove()
+ * @pdev: pointer to platform device structure
+ *
+ */
+static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+};
+
+static struct platform_driver tc_dwc_g210_pltfm_driver = {
+ .probe = tc_dwc_g210_pltfm_probe,
+ .remove = tc_dwc_g210_pltfm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "tc-dwc-g210-pltfm",
+ .pm = &tc_dwc_g210_pltfm_pm_ops,
+ .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
+ },
+};
+
+module_platform_driver(tc_dwc_g210_pltfm_driver);
+
+MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
+MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ufs/host/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c
new file mode 100644
index 000000000..deb93dbd8
--- /dev/null
+++ b/drivers/ufs/host/tc-dwc-g210.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#include <linux/module.h>
+
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+
+#include "ufshcd-dwc.h"
+#include "ufshci-dwc.h"
+#include "tc-dwc-g210.h"
+
+/**
+ * tc_dwc_g210_setup_40bit_rmmi()
+ * This function configures Synopsys TC specific atributes (40-bit RMMI)
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
+{
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
+ { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
+ { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL },
+ { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
+ { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi_lane0()
+ * This function configures Synopsys TC 20-bit RMMI Lane 0
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
+{
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi_lane1()
+ * This function configures Synopsys TC 20-bit RMMI Lane 1
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
+{
+ int connected_rx_lanes = 0;
+ int connected_tx_lanes = 0;
+ int ret = 0;
+
+ static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ };
+
+ static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f,
+ DME_LOCAL },
+ };
+
+ /* Get the available lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
+ &connected_rx_lanes);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
+ &connected_tx_lanes);
+
+ if (connected_tx_lanes == 2) {
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs,
+ ARRAY_SIZE(setup_tx_attrs));
+
+ if (ret)
+ goto out;
+ }
+
+ if (connected_rx_lanes == 2) {
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs,
+ ARRAY_SIZE(setup_rx_attrs));
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi()
+ * This function configures Synopsys TC specific atributes (20-bit RMMI)
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
+ { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
+ { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL },
+ { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
+ { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
+ };
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+ if (ret)
+ goto out;
+
+ /* Lane 0 configuration*/
+ ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba);
+ if (ret)
+ goto out;
+
+ /* Lane 1 configuration*/
+ ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+/**
+ * tc_dwc_g210_config_40_bit()
+ * This function configures Local (host) Synopsys 40-bit TC specific attributes
+ *
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n");
+ ret = tc_dwc_g210_setup_40bit_rmmi(hba);
+ if (ret) {
+ dev_err(hba->dev, "Configuration failed\n");
+ goto out;
+ }
+
+ /* To write Shadow register bank to effective configuration block */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ if (ret)
+ goto out;
+
+ /* To configure Debug OMC */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
+
+/**
+ * tc_dwc_g210_config_20_bit()
+ * This function configures Local (host) Synopsys 20-bit TC specific attributes
+ *
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n");
+ ret = tc_dwc_g210_setup_20bit_rmmi(hba);
+ if (ret) {
+ dev_err(hba->dev, "Configuration failed\n");
+ goto out;
+ }
+
+ /* To write Shadow register bank to effective configuration block */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ if (ret)
+ goto out;
+
+ /* To configure Debug OMC */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(tc_dwc_g210_config_20_bit);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys G210 Test Chip driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ufs/host/tc-dwc-g210.h b/drivers/ufs/host/tc-dwc-g210.h
new file mode 100644
index 000000000..f7154012f
--- /dev/null
+++ b/drivers/ufs/host/tc-dwc-g210.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#ifndef _TC_DWC_G210_H
+#define _TC_DWC_G210_H
+
+struct ufs_hba;
+
+int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
+int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
+
+#endif /* End of Header */
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
new file mode 100644
index 000000000..122d650d0
--- /dev/null
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TI_UFS_SS_CTRL 0x4
+#define TI_UFS_SS_RST_N_PCS BIT(0)
+#define TI_UFS_SS_CLK_26MHZ BIT(4)
+
+static int ti_j721e_ufs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long clk_rate;
+ void __iomem *regbase;
+ struct clk *clk;
+ u32 reg = 0;
+ int ret;
+
+ regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto disable_pm;
+
+ /* Select MPHY refclk frequency */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "Cannot claim MPHY clock.\n");
+ goto clk_err;
+ }
+ clk_rate = clk_get_rate(clk);
+ if (clk_rate == 26000000)
+ reg |= TI_UFS_SS_CLK_26MHZ;
+ devm_clk_put(dev, clk);
+
+ /* Take UFS slave device out of reset */
+ reg |= TI_UFS_SS_RST_N_PCS;
+ writel(reg, regbase + TI_UFS_SS_CTRL);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ goto clk_err;
+ }
+
+ return ret;
+
+clk_err:
+ pm_runtime_put_sync(dev);
+disable_pm:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int ti_j721e_ufs_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_j721e_ufs_of_match[] = {
+ {
+ .compatible = "ti,j721e-ufs",
+ },
+ { },
+};
+
+static struct platform_driver ti_j721e_ufs_driver = {
+ .probe = ti_j721e_ufs_probe,
+ .remove = ti_j721e_ufs_remove,
+ .driver = {
+ .name = "ti-j721e-ufs",
+ .of_match_table = ti_j721e_ufs_of_match,
+ },
+};
+module_platform_driver(ti_j721e_ufs_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("TI UFS host controller glue driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
new file mode 100644
index 000000000..3cdac89a2
--- /dev/null
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -0,0 +1,1764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS Host Controller driver for Exynos specific extensions
+ *
+ * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
+ * Author: Seungwon Jeon <essuuj@gmail.com>
+ * Author: Alim Akhtar <alim.akhtar@samsung.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include <ufs/ufshci.h>
+#include <ufs/unipro.h>
+
+#include "ufs-exynos.h"
+
+/*
+ * Exynos's Vendor specific registers for UFSHCI
+ */
+#define HCI_TXPRDT_ENTRY_SIZE 0x00
+#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_SET_SIZE(x) ((x) & 0x1F)
+#define HCI_RXPRDT_ENTRY_SIZE 0x04
+#define HCI_1US_TO_CNT_VAL 0x0C
+#define CNT_VAL_1US_MASK 0x3FF
+#define HCI_UTRL_NEXUS_TYPE 0x40
+#define HCI_UTMRL_NEXUS_TYPE 0x44
+#define HCI_SW_RST 0x50
+#define UFS_LINK_SW_RST BIT(0)
+#define UFS_UNIPRO_SW_RST BIT(1)
+#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
+#define HCI_DATA_REORDER 0x60
+#define HCI_UNIPRO_APB_CLK_CTRL 0x68
+#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
+#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
+#define HCI_GPIO_OUT 0x70
+#define HCI_ERR_EN_PA_LAYER 0x78
+#define HCI_ERR_EN_DL_LAYER 0x7C
+#define HCI_ERR_EN_N_LAYER 0x80
+#define HCI_ERR_EN_T_LAYER 0x84
+#define HCI_ERR_EN_DME_LAYER 0x88
+#define HCI_CLKSTOP_CTRL 0xB0
+#define REFCLKOUT_STOP BIT(4)
+#define MPHY_APBCLK_STOP BIT(3)
+#define REFCLK_STOP BIT(2)
+#define UNIPRO_MCLK_STOP BIT(1)
+#define UNIPRO_PCLK_STOP BIT(0)
+#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
+ UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
+ UNIPRO_PCLK_STOP)
+#define HCI_MISC 0xB4
+#define REFCLK_CTRL_EN BIT(7)
+#define UNIPRO_PCLK_CTRL_EN BIT(6)
+#define UNIPRO_MCLK_CTRL_EN BIT(5)
+#define HCI_CORECLK_CTRL_EN BIT(4)
+#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
+ UNIPRO_PCLK_CTRL_EN |\
+ UNIPRO_MCLK_CTRL_EN)
+/* Device fatal error */
+#define DFES_ERR_EN BIT(31)
+#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
+ UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
+ UIC_NETWORK_BAD_DEVICEID_ENC |\
+ UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
+#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
+ UIC_TRANSPORT_UNKNOWN_CPORTID |\
+ UIC_TRANSPORT_NO_CONNECTION_RX |\
+ UIC_TRANSPORT_BAD_TC)
+
+/* FSYS UFS Shareability */
+#define UFS_WR_SHARABLE BIT(2)
+#define UFS_RD_SHARABLE BIT(1)
+#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
+
+/* Multi-host registers */
+#define MHCTRL 0xC4
+#define MHCTRL_EN_VH_MASK (0xE)
+#define MHCTRL_EN_VH(vh) (vh << 1)
+#define PH2VH_MBOX 0xD8
+
+#define MH_MSG_MASK (0xFF)
+
+#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
+#define MH_MSG_PH_READY 0x1
+#define MH_MSG_VH_READY 0x2
+
+#define ALLOW_INQUIRY BIT(25)
+#define ALLOW_MODE_SELECT BIT(24)
+#define ALLOW_MODE_SENSE BIT(23)
+#define ALLOW_PRE_FETCH GENMASK(22, 21)
+#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
+#define ALLOW_READ_BUFFER BIT(17)
+#define ALLOW_READ_CAPACITY GENMASK(16, 15)
+#define ALLOW_REPORT_LUNS BIT(14)
+#define ALLOW_REQUEST_SENSE BIT(13)
+#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
+#define ALLOW_TEST_UNIT_READY BIT(6)
+#define ALLOW_UNMAP BIT(5)
+#define ALLOW_VERIFY BIT(4)
+#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
+
+#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
+ ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
+ ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
+ ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
+ ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
+ ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
+ ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
+
+#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
+#define HCI_MH_IID_IN_TASK_TAG 0X308
+
+#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
+
+enum {
+ UNIPRO_L1_5 = 0,/* PHY Adapter */
+ UNIPRO_L2, /* Data Link */
+ UNIPRO_L3, /* Network */
+ UNIPRO_L4, /* Transport */
+ UNIPRO_DME, /* DME */
+};
+
+/*
+ * UNIPRO registers
+ */
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
+
+/*
+ * UFS Protector registers
+ */
+#define UFSPRSECURITY 0x010
+#define NSSMU BIT(14)
+#define UFSPSBEGIN0 0x200
+#define UFSPSEND0 0x204
+#define UFSPSLUN0 0x208
+#define UFSPSCTRL0 0x20C
+
+#define CNTR_DIV_VAL 40
+
+static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
+static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
+
+static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
+{
+ exynos_ufs_auto_ctrl_hcc(ufs, true);
+}
+
+static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
+{
+ exynos_ufs_auto_ctrl_hcc(ufs, false);
+}
+
+static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
+ struct exynos_ufs *ufs, u32 *val)
+{
+ *val = hci_readl(ufs, HCI_MISC);
+ exynos_ufs_auto_ctrl_hcc(ufs, false);
+}
+
+static inline void exynos_ufs_auto_ctrl_hcc_restore(
+ struct exynos_ufs *ufs, u32 *val)
+{
+ hci_writel(ufs, *val, HCI_MISC);
+}
+
+static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
+{
+ exynos_ufs_ctrl_clkstop(ufs, true);
+}
+
+static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
+{
+ exynos_ufs_ctrl_clkstop(ufs, false);
+}
+
+static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+{
+ return 0;
+}
+
+static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ /* IO Coherency setting */
+ if (ufs->sysreg) {
+ return regmap_update_bits(ufs->sysreg,
+ ufs->shareability_reg_offset,
+ UFS_SHARABLE, UFS_SHARABLE);
+ }
+
+ attr->tx_dif_p_nsec = 3200000;
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* Enable Virtual Host #1 */
+ ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
+ /* Default VH Transfer permissions */
+ hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
+ /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
+ hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int i;
+ u32 tx_line_reset_period, rx_line_reset_period;
+
+ rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+ tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
+ (rx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
+ (rx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
+ (rx_line_reset_period) & 0xFF);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
+ }
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
+ 0x02);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
+ (tx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
+ (tx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
+ (tx_line_reset_period) & 0xFF);
+
+ /* TX PWM Gear Capability / PWM_G1_ONLY */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* PACP_PWR_req and delivered to the remote DME */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+ u32 enabled_vh;
+
+ enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
+
+ /* Send physical host ready message to virtual hosts */
+ ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
+
+ return 0;
+}
+
+static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
+ int i;
+
+ exynos_ufs_enable_ov_tm(hba);
+ for_each_ufs_tx_lane(ufs, i)
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
+ }
+ exynos_ufs_disable_ov_tm(hba);
+
+ for_each_ufs_tx_lane(ufs, i)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
+ udelay(1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
+ udelay(1600);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
+
+ return 0;
+}
+
+static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int i;
+
+ exynos_ufs_enable_ov_tm(hba);
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
+ TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
+ }
+ exynos_ufs_disable_ov_tm(hba);
+
+ exynos_ufs_enable_dbg_mode(hba);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
+ exynos_ufs_disable_dbg_mode(hba);
+
+ return 0;
+}
+
+static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
+
+ return 0;
+}
+
+static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
+
+ if (lanes == 1) {
+ exynos_ufs_enable_dbg_mode(hba);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
+ exynos_ufs_disable_dbg_mode(hba);
+ }
+
+ return 0;
+}
+
+/*
+ * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
+ * Control should be disabled in the below cases
+ * - Before host controller S/W reset
+ * - Access to UFS protector's register
+ */
+static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
+{
+ u32 misc = hci_readl(ufs, HCI_MISC);
+
+ if (en)
+ hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
+ else
+ hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
+}
+
+static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
+{
+ u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
+ u32 misc = hci_readl(ufs, HCI_MISC);
+
+ if (en) {
+ hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
+ hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
+ } else {
+ hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
+ hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
+ }
+}
+
+static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ unsigned long pclk_rate;
+ u32 f_min, f_max;
+ u8 div = 0;
+ int ret = 0;
+
+ if (list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR(clki->clk)) {
+ if (!strcmp(clki->name, "core_clk"))
+ ufs->clk_hci_core = clki->clk;
+ else if (!strcmp(clki->name, "sclk_unipro_main"))
+ ufs->clk_unipro_main = clki->clk;
+ }
+ }
+
+ if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
+ dev_err(hba->dev, "failed to get clk info\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
+ pclk_rate = clk_get_rate(ufs->clk_hci_core);
+ f_min = ufs->pclk_avail_min;
+ f_max = ufs->pclk_avail_max;
+
+ if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
+ do {
+ pclk_rate /= (div + 1);
+
+ if (pclk_rate <= f_max)
+ break;
+ div++;
+ } while (pclk_rate >= f_min);
+ }
+
+ if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
+ dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ufs->pclk_rate = pclk_rate;
+ ufs->pclk_div = div;
+
+out:
+ return ret;
+}
+
+static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
+{
+ if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
+ u32 val;
+
+ val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
+ hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
+ HCI_UNIPRO_APB_CLK_CTRL);
+ }
+}
+
+static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
+}
+
+static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ const unsigned int div = 30, mult = 20;
+ const unsigned long pwm_min = 3 * 1000 * 1000;
+ const unsigned long pwm_max = 9 * 1000 * 1000;
+ const int divs[] = {32, 16, 8, 4};
+ unsigned long clk = 0, _clk, clk_period;
+ int i = 0, clk_idx = -1;
+
+ clk_period = UNIPRO_PCLK_PERIOD(ufs);
+ for (i = 0; i < ARRAY_SIZE(divs); i++) {
+ _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
+ if (_clk >= pwm_min && _clk <= pwm_max) {
+ if (_clk > clk) {
+ clk_idx = i;
+ clk = _clk;
+ }
+ }
+ }
+
+ if (clk_idx == -1) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
+ dev_err(hba->dev,
+ "failed to decide pwm clock divider, will not change\n");
+ }
+
+ attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
+}
+
+long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
+{
+ const int precise = 10;
+ long pclk_rate = ufs->pclk_rate;
+ long clk_period, fraction;
+
+ clk_period = UNIPRO_PCLK_PERIOD(ufs);
+ fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
+
+ return (period * precise) / ((clk_period * precise) + fraction);
+}
+
+static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
+{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
+
+ t_cfg->tx_linereset_p =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
+ t_cfg->tx_linereset_n =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
+ t_cfg->tx_high_z_cnt =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
+ t_cfg->tx_base_n_val =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
+ t_cfg->tx_gran_n_val =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
+ t_cfg->tx_sleep_cnt =
+ exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
+
+ t_cfg->rx_linereset =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
+ t_cfg->rx_hibern8_wait =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
+ t_cfg->rx_base_n_val =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
+ t_cfg->rx_gran_n_val =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
+ t_cfg->rx_sleep_cnt =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
+ t_cfg->rx_stall_cnt =
+ exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
+}
+
+static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
+ int i;
+
+ exynos_ufs_set_pwm_clk_div(ufs);
+
+ exynos_ufs_enable_ov_tm(hba);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
+ ufs->drv_data->uic_attr->rx_filler_enable);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
+ RX_LINERESET(t_cfg->rx_linereset));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
+ RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
+ RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
+ RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
+ RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
+ RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
+ RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
+ }
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
+ TX_LINERESET_P(t_cfg->tx_linereset_p));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
+ TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
+ TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
+ TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
+ TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
+ TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
+ TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
+ TX_OV_H8_ENTER_EN |
+ TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
+ ufs->drv_data->uic_attr->tx_min_activatetime);
+ }
+
+ exynos_ufs_disable_ov_tm(hba);
+}
+
+static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ int i;
+
+ exynos_ufs_enable_ov_tm(hba);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
+ attr->rx_hs_g1_sync_len_cap);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
+ attr->rx_hs_g2_sync_len_cap);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
+ attr->rx_hs_g3_sync_len_cap);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
+ attr->rx_hs_g1_prep_sync_len_cap);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
+ attr->rx_hs_g2_prep_sync_len_cap);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
+ attr->rx_hs_g3_prep_sync_len_cap);
+ }
+
+ if (attr->rx_adv_fine_gran_sup_en == 0) {
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
+
+ if (attr->rx_min_actv_time_cap)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY, i),
+ attr->rx_min_actv_time_cap);
+
+ if (attr->rx_hibern8_time_cap)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
+ attr->rx_hibern8_time_cap);
+ }
+ } else if (attr->rx_adv_fine_gran_sup_en == 1) {
+ for_each_ufs_rx_lane(ufs, i) {
+ if (attr->rx_adv_fine_gran_step)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
+ i), RX_ADV_FINE_GRAN_STEP(
+ attr->rx_adv_fine_gran_step));
+
+ if (attr->rx_adv_min_actv_time_cap)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(
+ RX_ADV_MIN_ACTIVATETIME_CAP, i),
+ attr->rx_adv_min_actv_time_cap);
+
+ if (attr->rx_adv_hibern8_time_cap)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
+ i),
+ attr->rx_adv_hibern8_time_cap);
+ }
+ }
+
+ exynos_ufs_disable_ov_tm(hba);
+}
+
+static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ enum {
+ DEV_ID = 0x00,
+ PEER_DEV_ID = 0x01,
+ PEER_CPORT_ID = 0x00,
+ TRAFFIC_CLASS = 0x00,
+ };
+
+ /* allow cport attributes to be set */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
+
+ /* local unipro attributes */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
+}
+
+static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
+{
+ u32 reg, val;
+
+ exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
+
+ /* make encryption disabled by default */
+ reg = ufsp_readl(ufs, UFSPRSECURITY);
+ ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
+ ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
+ ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
+ ufsp_writel(ufs, 0xff, UFSPSLUN0);
+ ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
+
+ exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
+}
+
+static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+ u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
+ u32 mask, sync_len;
+ enum {
+ SYNC_LEN_G1 = 80 * 1000, /* 80us */
+ SYNC_LEN_G2 = 40 * 1000, /* 44us */
+ SYNC_LEN_G3 = 20 * 1000, /* 20us */
+ };
+ int i;
+
+ if (g == 1)
+ sync_len = SYNC_LEN_G1;
+ else if (g == 2)
+ sync_len = SYNC_LEN_G2;
+ else if (g == 3)
+ sync_len = SYNC_LEN_G3;
+ else
+ return;
+
+ mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
+ mask = (mask >> 8) & 0xff;
+
+ exynos_ufs_enable_ov_tm(hba);
+
+ for_each_ufs_rx_lane(ufs, i)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
+
+ exynos_ufs_disable_ov_tm(hba);
+}
+
+static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ struct phy *generic_phy = ufs->phy;
+ struct ufs_dev_params ufs_exynos_cap;
+ int ret;
+
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
+
+ ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
+ dev_max_params, dev_req_params);
+ if (ret) {
+ pr_err("%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ if (ufs->drv_data->pre_pwr_change)
+ ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
+
+ if (ufshcd_is_hs_mode(dev_req_params)) {
+ exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
+
+ switch (dev_req_params->hs_rate) {
+ case PA_HS_MODE_A:
+ case PA_HS_MODE_B:
+ phy_calibrate(generic_phy);
+ break;
+ }
+ }
+
+ /* setting for three timeout values for traffic class #0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
+
+ return 0;
+out:
+ return ret;
+}
+
+#define PWR_MODE_STR_LEN 64
+static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_req)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ struct phy *generic_phy = ufs->phy;
+ int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
+ int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
+ char pwr_str[PWR_MODE_STR_LEN] = "";
+
+ /* let default be PWM Gear 1, Lane 1 */
+ if (!gear)
+ gear = 1;
+
+ if (!lanes)
+ lanes = 1;
+
+ if (ufs->drv_data->post_pwr_change)
+ ufs->drv_data->post_pwr_change(ufs, pwr_req);
+
+ if ((ufshcd_is_hs_mode(pwr_req))) {
+ switch (pwr_req->hs_rate) {
+ case PA_HS_MODE_A:
+ case PA_HS_MODE_B:
+ phy_calibrate(generic_phy);
+ break;
+ }
+
+ snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
+ "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
+ gear, lanes);
+ } else {
+ snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
+ "SLOW", gear, lanes);
+ }
+
+ dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
+
+ return 0;
+}
+
+static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
+ int tag, bool is_scsi_cmd)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ u32 type;
+
+ type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
+
+ if (is_scsi_cmd)
+ hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
+ else
+ hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
+}
+
+static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
+ int tag, u8 func)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ u32 type;
+
+ type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
+
+ switch (func) {
+ case UFS_ABORT_TASK:
+ case UFS_QUERY_TASK:
+ hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
+ break;
+ case UFS_ABORT_TASK_SET:
+ case UFS_CLEAR_TASK_SET:
+ case UFS_LOGICAL_RESET:
+ case UFS_QUERY_TASK_SET:
+ hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
+ break;
+ }
+}
+
+static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ struct phy *generic_phy = ufs->phy;
+ int ret = 0;
+
+ if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
+ &ufs->avail_ln_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
+ &ufs->avail_ln_tx);
+ WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
+ "available data lane is not equal(rx:%d, tx:%d)\n",
+ ufs->avail_ln_rx, ufs->avail_ln_tx);
+ }
+
+ phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
+ ret = phy_init(generic_phy);
+ if (ret) {
+ dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = phy_power_on(generic_phy);
+ if (ret)
+ goto out_exit_phy;
+
+ return 0;
+
+out_exit_phy:
+ phy_exit(generic_phy);
+
+ return ret;
+}
+
+static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
+ ufs->drv_data->uic_attr->tx_trailingclks);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
+ ufs->drv_data->uic_attr->pa_dbg_option_suite);
+}
+
+static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
+{
+ switch (index) {
+ case UNIPRO_L1_5:
+ hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
+ break;
+ case UNIPRO_L2:
+ hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
+ break;
+ case UNIPRO_L3:
+ hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
+ break;
+ case UNIPRO_L4:
+ hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
+ break;
+ case UNIPRO_DME:
+ hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
+ break;
+ }
+}
+
+static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (!ufs)
+ return 0;
+
+ if (on && status == PRE_CHANGE) {
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_disable_auto_ctrl_hcc(ufs);
+ exynos_ufs_ungate_clks(ufs);
+ } else if (!on && status == POST_CHANGE) {
+ exynos_ufs_gate_clks(ufs);
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_enable_auto_ctrl_hcc(ufs);
+ }
+
+ return 0;
+}
+
+static int exynos_ufs_pre_link(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ /* hci */
+ exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
+ exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
+ exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
+ exynos_ufs_set_unipro_pclk_div(ufs);
+
+ /* unipro */
+ exynos_ufs_config_unipro(ufs);
+
+ /* m-phy */
+ exynos_ufs_phy_init(ufs);
+ if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
+ exynos_ufs_config_phy_time_attr(ufs);
+ exynos_ufs_config_phy_cap_attr(ufs);
+ }
+
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
+ return 0;
+}
+
+static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
+{
+ u32 val;
+
+ val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
+ hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
+}
+
+static int exynos_ufs_post_link(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ struct phy *generic_phy = ufs->phy;
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ exynos_ufs_establish_connt(ufs);
+ exynos_ufs_fit_aggr_timeout(ufs);
+
+ hci_writel(ufs, 0xa, HCI_DATA_REORDER);
+ hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
+ hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
+ hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+ hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
+
+ if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
+
+ if (attr->pa_granularity) {
+ exynos_ufs_enable_dbg_mode(hba);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ attr->pa_granularity);
+ exynos_ufs_disable_dbg_mode(hba);
+
+ if (attr->pa_tactivate)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ attr->pa_tactivate);
+ if (attr->pa_hibern8time &&
+ !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ attr->pa_hibern8time);
+ }
+
+ if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
+ if (!attr->pa_granularity)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &attr->pa_granularity);
+ if (!attr->pa_hibern8time)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ &attr->pa_hibern8time);
+ /*
+ * not wait for HIBERN8 time to exit hibernation
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
+
+ if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
+ /* Valid range for granularity: 1 ~ 6 */
+ dev_warn(hba->dev,
+ "%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
+ __func__,
+ attr->pa_granularity);
+ attr->pa_granularity = 6;
+ }
+ }
+
+ phy_calibrate(generic_phy);
+
+ if (ufs->drv_data->post_link)
+ ufs->drv_data->post_link(ufs);
+
+ return 0;
+}
+
+static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
+{
+ struct device_node *np = dev->of_node;
+ struct exynos_ufs_uic_attr *attr;
+ int ret = 0;
+
+ ufs->drv_data = device_get_match_data(dev);
+
+ if (ufs->drv_data && ufs->drv_data->uic_attr) {
+ attr = ufs->drv_data->uic_attr;
+ } else {
+ dev_err(dev, "failed to get uic attributes\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ if (IS_ERR(ufs->sysreg))
+ ufs->sysreg = NULL;
+ else {
+ if (of_property_read_u32_index(np, "samsung,sysreg", 1,
+ &ufs->shareability_reg_offset)) {
+ dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
+ ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ }
+ }
+
+ ufs->pclk_avail_min = PCLK_AVAIL_MIN;
+ ufs->pclk_avail_max = PCLK_AVAIL_MAX;
+
+ attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
+ attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
+ attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
+ attr->pa_granularity = PA_GRANULARITY_VAL;
+ attr->pa_tactivate = PA_TACTIVATE_VAL;
+ attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
+
+out:
+ return ret;
+}
+
+static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
+ struct exynos_ufs *ufs)
+{
+ ufs->hba = hba;
+ ufs->opts = ufs->drv_data->opts;
+ ufs->rx_sel_idx = PA_MAXDATALANES;
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
+ ufs->rx_sel_idx = 0;
+ hba->priv = (void *)ufs;
+ hba->quirks = ufs->drv_data->quirks;
+}
+
+static int exynos_ufs_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ufs *ufs;
+ int ret;
+
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ /* exynos-specific hci */
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
+ if (IS_ERR(ufs->reg_hci)) {
+ dev_err(dev, "cannot ioremap for hci vendor register\n");
+ return PTR_ERR(ufs->reg_hci);
+ }
+
+ /* unipro */
+ ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
+ if (IS_ERR(ufs->reg_unipro)) {
+ dev_err(dev, "cannot ioremap for unipro register\n");
+ return PTR_ERR(ufs->reg_unipro);
+ }
+
+ /* ufs protector */
+ ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
+ if (IS_ERR(ufs->reg_ufsp)) {
+ dev_err(dev, "cannot ioremap for ufs protector register\n");
+ return PTR_ERR(ufs->reg_ufsp);
+ }
+
+ ret = exynos_ufs_parse_dt(dev, ufs);
+ if (ret) {
+ dev_err(dev, "failed to get dt info.\n");
+ goto out;
+ }
+
+ ufs->phy = devm_phy_get(dev, "ufs-phy");
+ if (IS_ERR(ufs->phy)) {
+ ret = PTR_ERR(ufs->phy);
+ dev_err(dev, "failed to get ufs-phy\n");
+ goto out;
+ }
+
+ exynos_ufs_priv_init(hba, ufs);
+
+ if (ufs->drv_data->drv_init) {
+ ret = ufs->drv_data->drv_init(dev, ufs);
+ if (ret) {
+ dev_err(dev, "failed to init drv-data\n");
+ goto out;
+ }
+ }
+
+ ret = exynos_ufs_get_clk_info(ufs);
+ if (ret)
+ goto out;
+ exynos_ufs_specify_phy_time_attr(ufs);
+ exynos_ufs_config_smu(ufs);
+ return 0;
+
+out:
+ hba->priv = NULL;
+ return ret;
+}
+
+static int exynos_ufs_host_reset(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
+ u32 val;
+ int ret = 0;
+
+ exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
+
+ hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
+
+ do {
+ if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
+ goto out;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(hba->dev, "timeout host sw-reset\n");
+ ret = -ETIMEDOUT;
+
+out:
+ exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
+ return ret;
+}
+
+static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ udelay(5);
+ hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
+}
+
+static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ if (!enter) {
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_disable_auto_ctrl_hcc(ufs);
+ exynos_ufs_ungate_clks(ufs);
+
+ if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
+ static const unsigned int granularity_tbl[] = {
+ 1, 4, 8, 16, 32, 100
+ };
+ int h8_time = attr->pa_hibern8time *
+ granularity_tbl[attr->pa_granularity - 1];
+ unsigned long us;
+ s64 delta;
+
+ do {
+ delta = h8_time - ktime_us_delta(ktime_get(),
+ ufs->entry_hibern8_t);
+ if (delta <= 0)
+ break;
+
+ us = min_t(s64, delta, USEC_PER_MSEC);
+ if (us >= 10)
+ usleep_range(us, us + 10);
+ } while (1);
+ }
+ }
+}
+
+static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (!enter) {
+ u32 cur_mode = 0;
+ u32 pwrmode;
+
+ if (ufshcd_is_hs_mode(&ufs->dev_req_params))
+ pwrmode = FAST_MODE;
+ else
+ pwrmode = SLOW_MODE;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
+ if (cur_mode != (pwrmode << 4 | pwrmode)) {
+ dev_warn(hba->dev, "%s: power mode change\n", __func__);
+ hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
+ hba->pwr_info.pwr_tx = cur_mode & 0xf;
+ ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ }
+
+ if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
+ exynos_ufs_establish_connt(ufs);
+ } else {
+ ufs->entry_hibern8_t = ktime_get();
+ exynos_ufs_gate_clks(ufs);
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_enable_auto_ctrl_hcc(ufs);
+ }
+}
+
+static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ int ret = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (ufs->drv_data->pre_hce_enable) {
+ ret = ufs->drv_data->pre_hce_enable(ufs);
+ if (ret)
+ return ret;
+ }
+
+ ret = exynos_ufs_host_reset(hba);
+ if (ret)
+ return ret;
+ exynos_ufs_dev_hw_reset(hba);
+ break;
+ case POST_CHANGE:
+ exynos_ufs_calc_pwm_clk_div(ufs);
+ if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
+ exynos_ufs_enable_auto_ctrl_hcc(ufs);
+
+ if (ufs->drv_data->post_hce_enable)
+ ret = ufs->drv_data->post_hce_enable(ufs);
+
+ break;
+ }
+
+ return ret;
+}
+
+static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int ret = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ ret = exynos_ufs_pre_link(hba);
+ break;
+ case POST_CHANGE:
+ ret = exynos_ufs_post_link(hba);
+ break;
+ }
+
+ return ret;
+}
+
+static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int ret = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
+ dev_req_params);
+ break;
+ case POST_CHANGE:
+ ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
+ break;
+ }
+
+ return ret;
+}
+
+static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme enter,
+ enum ufs_notify_change_status notify)
+{
+ switch ((u8)notify) {
+ case PRE_CHANGE:
+ exynos_ufs_pre_hibern8(hba, enter);
+ break;
+ case POST_CHANGE:
+ exynos_ufs_post_hibern8(hba, enter);
+ break;
+ }
+}
+
+static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE)
+ return 0;
+
+ if (!ufshcd_is_link_active(hba))
+ phy_power_off(ufs->phy);
+
+ return 0;
+}
+
+static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (!ufshcd_is_link_active(hba))
+ phy_power_on(ufs->phy);
+
+ exynos_ufs_config_smu(ufs);
+
+ return 0;
+}
+
+static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status == POST_CHANGE) {
+ ufshcd_set_link_active(hba);
+ ufshcd_set_ufs_dev_active(hba);
+ }
+
+ return 0;
+}
+
+static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
+{
+ u32 mbox;
+ ktime_t start, stop;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
+
+ do {
+ mbox = ufshcd_readl(hba, PH2VH_MBOX);
+ /* TODO: Mailbox message protocols between the PH and VHs are
+ * not implemented yet. This will be supported later
+ */
+ if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
+ return 0;
+
+ usleep_range(40, 50);
+ } while (ktime_before(ktime_get(), stop));
+
+ return -ETIME;
+}
+
+static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ufs *ufs;
+ int ret;
+
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ /* exynos-specific hci */
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
+ if (IS_ERR(ufs->reg_hci)) {
+ dev_err(dev, "cannot ioremap for hci vendor register\n");
+ return PTR_ERR(ufs->reg_hci);
+ }
+
+ ret = exynosauto_ufs_vh_wait_ph_ready(hba);
+ if (ret)
+ return ret;
+
+ ufs->drv_data = device_get_match_data(dev);
+ if (!ufs->drv_data)
+ return -ENODEV;
+
+ exynos_ufs_priv_init(hba, ufs);
+
+ return 0;
+}
+
+static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
+ }
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ exynos_ufs_establish_connt(ufs);
+
+ return 0;
+}
+
+static int fsd_ufs_post_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+ u32 hw_cap_min_tactivate;
+ u32 peer_rx_min_actv_time_cap;
+ u32 max_rx_hibern8_time_cap;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
+ &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_rx_min_actv_time_cap); /* PA_TActivate */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ &max_rx_hibern8_time_cap); /* PA_Hibern8Time */
+
+ if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ peer_rx_min_actv_time_cap + 1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ return 0;
+}
+
+static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
+ unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
+ unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
+ .name = "exynos_ufs",
+ .init = exynos_ufs_init,
+ .hce_enable_notify = exynos_ufs_hce_enable_notify,
+ .link_startup_notify = exynos_ufs_link_startup_notify,
+ .pwr_change_notify = exynos_ufs_pwr_change_notify,
+ .setup_clocks = exynos_ufs_setup_clocks,
+ .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
+ .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
+ .hibern8_notify = exynos_ufs_hibern8_notify,
+ .suspend = exynos_ufs_suspend,
+ .resume = exynos_ufs_resume,
+};
+
+static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
+ .name = "exynosauto_ufs_vh",
+ .init = exynosauto_ufs_vh_init,
+ .link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
+};
+
+static int exynos_ufs_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
+ const struct exynos_ufs_drv_data *drv_data =
+ device_get_match_data(dev);
+
+ if (drv_data && drv_data->vops)
+ vops = drv_data->vops;
+
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+static int exynos_ufs_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+
+ return 0;
+}
+
+static struct exynos_ufs_uic_attr exynos7_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x30103,
+};
+
+static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+ .uic_attr = &exynos7_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .drv_init = exynosauto_ufs_drv_init,
+ .post_hce_enable = exynosauto_ufs_post_hce_enable,
+ .pre_link = exynosauto_ufs_pre_link,
+ .pre_pwr_change = exynosauto_ufs_pre_pwr_change,
+ .post_pwr_change = exynosauto_ufs_post_pwr_change,
+};
+
+static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+ .vops = &ufs_hba_exynosauto_vh_ops,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_BROKEN_HCE |
+ UFSHCD_QUIRK_BROKEN_UIC_CMD |
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+};
+
+static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ .uic_attr = &exynos7_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCI_QUIRK_BROKEN_HCE |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+ EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
+ EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
+ .drv_init = exynos7_ufs_drv_init,
+ .pre_link = exynos7_ufs_pre_link,
+ .post_link = exynos7_ufs_post_link,
+ .pre_pwr_change = exynos7_ufs_pre_pwr_change,
+ .post_pwr_change = exynos7_ufs_post_pwr_change,
+};
+
+static struct exynos_ufs_uic_attr fsd_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x2E820183,
+};
+
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
+ .uic_attr = &fsd_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .pre_link = fsd_ufs_pre_link,
+ .post_link = fsd_ufs_post_link,
+ .pre_pwr_change = fsd_ufs_pre_pwr_change,
+};
+
+static const struct of_device_id exynos_ufs_of_match[] = {
+ { .compatible = "samsung,exynos7-ufs",
+ .data = &exynos_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs",
+ .data = &exynosauto_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs-vh",
+ .data = &exynosauto_ufs_vh_drvs },
+ { .compatible = "tesla,fsd-ufs",
+ .data = &fsd_ufs_drvs },
+ {},
+};
+
+static const struct dev_pm_ops exynos_ufs_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver exynos_ufs_pltform = {
+ .probe = exynos_ufs_probe,
+ .remove = exynos_ufs_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "exynos-ufshc",
+ .pm = &exynos_ufs_pm_ops,
+ .of_match_table = of_match_ptr(exynos_ufs_of_match),
+ },
+};
+module_platform_driver(exynos_ufs_pltform);
+
+MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
+MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
+MODULE_DESCRIPTION("Exynos UFS HCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
new file mode 100644
index 000000000..a4bd6646d
--- /dev/null
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS Host Controller driver for Exynos specific extensions
+ *
+ * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
+ *
+ */
+
+#ifndef _UFS_EXYNOS_H_
+#define _UFS_EXYNOS_H_
+
+/*
+ * UNIPRO registers
+ */
+#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150
+
+/*
+ * MIBs for PA debug registers
+ */
+#define PA_DBG_CLK_PERIOD 0x9514
+#define PA_DBG_TXPHY_CFGUPDT 0x9518
+#define PA_DBG_RXPHY_CFGUPDT 0x9519
+#define PA_DBG_MODE 0x9529
+#define PA_DBG_SKIP_RESET_PHY 0x9539
+#define PA_DBG_AUTOMODE_THLD 0x9536
+#define PA_DBG_OV_TM 0x9540
+#define PA_DBG_SKIP_LINE_RESET 0x9541
+#define PA_DBG_LINE_RESET_REQ 0x9543
+#define PA_DBG_OPTION_SUITE 0x9564
+#define PA_DBG_OPTION_SUITE_DYN 0x9565
+
+/*
+ * MIBs for Transport Layer debug registers
+ */
+#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001
+
+/*
+ * Exynos MPHY attributes
+ */
+#define TX_LINERESET_N_VAL 0x0277
+#define TX_LINERESET_N(v) (((v) >> 10) & 0xFF)
+#define TX_LINERESET_P_VAL 0x027D
+#define TX_LINERESET_P(v) (((v) >> 12) & 0xFF)
+#define TX_OV_SLEEP_CNT_TIMER 0x028E
+#define TX_OV_H8_ENTER_EN (1 << 7)
+#define TX_OV_SLEEP_CNT(v) (((v) >> 5) & 0x7F)
+#define TX_HIGH_Z_CNT_11_08 0x028C
+#define TX_HIGH_Z_CNT_H(v) (((v) >> 8) & 0xF)
+#define TX_HIGH_Z_CNT_07_00 0x028D
+#define TX_HIGH_Z_CNT_L(v) ((v) & 0xFF)
+#define TX_BASE_NVAL_07_00 0x0293
+#define TX_BASE_NVAL_L(v) ((v) & 0xFF)
+#define TX_BASE_NVAL_15_08 0x0294
+#define TX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF)
+#define TX_GRAN_NVAL_07_00 0x0295
+#define TX_GRAN_NVAL_L(v) ((v) & 0xFF)
+#define TX_GRAN_NVAL_10_08 0x0296
+#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
+
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE0 0xAD
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_PVALUE2 0xAB
+
+#define TX_LINE_RESET_TIME 3200
+
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_VALUE0 0x1D
+#define VND_RX_LINERESET_VALUE1 0x1C
+#define VND_RX_LINERESET_VALUE2 0x1B
+
+#define RX_LINE_RESET_TIME 1000
+
+#define RX_FILLER_ENABLE 0x0316
+#define RX_FILLER_EN (1 << 1)
+#define RX_LINERESET_VAL 0x0317
+#define RX_LINERESET(v) (((v) >> 12) & 0xFF)
+#define RX_LCC_IGNORE 0x0318
+#define RX_SYNC_MASK_LENGTH 0x0321
+#define RX_HIBERN8_WAIT_VAL_BIT_20_16 0x0331
+#define RX_HIBERN8_WAIT_VAL_BIT_15_08 0x0332
+#define RX_HIBERN8_WAIT_VAL_BIT_07_00 0x0333
+#define RX_OV_SLEEP_CNT_TIMER 0x0340
+#define RX_OV_SLEEP_CNT(v) (((v) >> 6) & 0x1F)
+#define RX_OV_STALL_CNT_TIMER 0x0341
+#define RX_OV_STALL_CNT(v) (((v) >> 4) & 0xFF)
+#define RX_BASE_NVAL_07_00 0x0355
+#define RX_BASE_NVAL_L(v) ((v) & 0xFF)
+#define RX_BASE_NVAL_15_08 0x0354
+#define RX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF)
+#define RX_GRAN_NVAL_07_00 0x0353
+#define RX_GRAN_NVAL_L(v) ((v) & 0xFF)
+#define RX_GRAN_NVAL_10_08 0x0352
+#define RX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
+
+#define CMN_PWM_CLK_CTRL 0x0402
+#define PWM_CLK_CTRL_MASK 0x3
+
+#define IATOVAL_NSEC 20000 /* unit: ns */
+#define UNIPRO_PCLK_PERIOD(ufs) (NSEC_PER_SEC / ufs->pclk_rate)
+
+struct exynos_ufs;
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define RX_ADV_FINE_GRAN_SUP_EN 0x1
+#define RX_ADV_FINE_GRAN_STEP_VAL 0x3
+#define RX_ADV_MIN_ACTV_TIME_CAP 0x9
+
+#define PA_GRANULARITY_VAL 0x6
+#define PA_TACTIVATE_VAL 0x3
+#define PA_HIBERN8TIME_VAL 0x20
+
+#define PCLK_AVAIL_MIN 70000000
+#define PCLK_AVAIL_MAX 167000000
+
+struct exynos_ufs_uic_attr {
+ /* TX Attributes */
+ unsigned int tx_trailingclks;
+ unsigned int tx_dif_p_nsec;
+ unsigned int tx_dif_n_nsec;
+ unsigned int tx_high_z_cnt_nsec;
+ unsigned int tx_base_unit_nsec;
+ unsigned int tx_gran_unit_nsec;
+ unsigned int tx_sleep_cnt;
+ unsigned int tx_min_activatetime;
+ /* RX Attributes */
+ unsigned int rx_filler_enable;
+ unsigned int rx_dif_p_nsec;
+ unsigned int rx_hibern8_wait_nsec;
+ unsigned int rx_base_unit_nsec;
+ unsigned int rx_gran_unit_nsec;
+ unsigned int rx_sleep_cnt;
+ unsigned int rx_stall_cnt;
+ unsigned int rx_hs_g1_sync_len_cap;
+ unsigned int rx_hs_g2_sync_len_cap;
+ unsigned int rx_hs_g3_sync_len_cap;
+ unsigned int rx_hs_g1_prep_sync_len_cap;
+ unsigned int rx_hs_g2_prep_sync_len_cap;
+ unsigned int rx_hs_g3_prep_sync_len_cap;
+ /* Common Attributes */
+ unsigned int cmn_pwm_clk_ctrl;
+ /* Internal Attributes */
+ unsigned int pa_dbg_option_suite;
+ /* Changeable Attributes */
+ unsigned int rx_adv_fine_gran_sup_en;
+ unsigned int rx_adv_fine_gran_step;
+ unsigned int rx_min_actv_time_cap;
+ unsigned int rx_hibern8_time_cap;
+ unsigned int rx_adv_min_actv_time_cap;
+ unsigned int rx_adv_hibern8_time_cap;
+ unsigned int pa_granularity;
+ unsigned int pa_tactivate;
+ unsigned int pa_hibern8time;
+};
+
+struct exynos_ufs_drv_data {
+ const struct ufs_hba_variant_ops *vops;
+ struct exynos_ufs_uic_attr *uic_attr;
+ unsigned int quirks;
+ unsigned int opts;
+ /* SoC's specific operations */
+ int (*drv_init)(struct device *dev, struct exynos_ufs *ufs);
+ int (*pre_link)(struct exynos_ufs *ufs);
+ int (*post_link)(struct exynos_ufs *ufs);
+ int (*pre_pwr_change)(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr);
+ int (*post_pwr_change)(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr);
+ int (*pre_hce_enable)(struct exynos_ufs *ufs);
+ int (*post_hce_enable)(struct exynos_ufs *ufs);
+};
+
+struct ufs_phy_time_cfg {
+ u32 tx_linereset_p;
+ u32 tx_linereset_n;
+ u32 tx_high_z_cnt;
+ u32 tx_base_n_val;
+ u32 tx_gran_n_val;
+ u32 tx_sleep_cnt;
+ u32 rx_linereset;
+ u32 rx_hibern8_wait;
+ u32 rx_base_n_val;
+ u32 rx_gran_n_val;
+ u32 rx_sleep_cnt;
+ u32 rx_stall_cnt;
+};
+
+struct exynos_ufs {
+ struct ufs_hba *hba;
+ struct phy *phy;
+ void __iomem *reg_hci;
+ void __iomem *reg_unipro;
+ void __iomem *reg_ufsp;
+ struct clk *clk_hci_core;
+ struct clk *clk_unipro_main;
+ struct clk *clk_apb;
+ u32 pclk_rate;
+ u32 pclk_div;
+ u32 pclk_avail_min;
+ u32 pclk_avail_max;
+ unsigned long mclk_rate;
+ int avail_ln_rx;
+ int avail_ln_tx;
+ int rx_sel_idx;
+ struct ufs_pa_layer_attr dev_req_params;
+ struct ufs_phy_time_cfg t_cfg;
+ ktime_t entry_hibern8_t;
+ const struct exynos_ufs_drv_data *drv_data;
+ struct regmap *sysreg;
+ u32 shareability_reg_offset;
+
+ u32 opts;
+#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
+#define EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB BIT(1)
+#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2)
+#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3)
+#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4)
+#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5)
+};
+
+#define for_each_ufs_rx_lane(ufs, i) \
+ for (i = (ufs)->rx_sel_idx; \
+ i < (ufs)->rx_sel_idx + (ufs)->avail_ln_rx; i++)
+#define for_each_ufs_tx_lane(ufs, i) \
+ for (i = 0; i < (ufs)->avail_ln_tx; i++)
+
+#define EXYNOS_UFS_MMIO_FUNC(name) \
+static inline void name##_writel(struct exynos_ufs *ufs, u32 val, u32 reg)\
+{ \
+ writel(val, ufs->reg_##name + reg); \
+} \
+ \
+static inline u32 name##_readl(struct exynos_ufs *ufs, u32 reg) \
+{ \
+ return readl(ufs->reg_##name + reg); \
+}
+
+EXYNOS_UFS_MMIO_FUNC(hci);
+EXYNOS_UFS_MMIO_FUNC(unipro);
+EXYNOS_UFS_MMIO_FUNC(ufsp);
+#undef EXYNOS_UFS_MMIO_FUNC
+
+long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long);
+
+static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba)
+{
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), true);
+}
+
+static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba)
+{
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), false);
+}
+
+static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba)
+{
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), true);
+}
+
+static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
+{
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), false);
+}
+
+#endif /* _UFS_EXYNOS_H_ */
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
new file mode 100644
index 000000000..2eed13bc8
--- /dev/null
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon Hixxxx UFS Driver
+ *
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include <ufs/unipro.h>
+#include "ufs-hisi.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
+
+static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 tx_fsm_val_0 = 0;
+ u32 tx_fsm_val_1 = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
+ tx_fsm_val_1 == TX_FSM_HIBERN8))
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout)) {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
+ tx_fsm_val_1 != TX_FSM_HIBERN8) {
+ err = -1;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
+ __func__, tx_fsm_val_0, tx_fsm_val_1);
+ }
+
+ return err;
+}
+
+static void ufs_hisi_clk_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
+ mdelay(1);
+ /* use abb clk */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
+ /* open mphy ref clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+}
+
+static void ufs_hisi_soc_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ u32 reg;
+
+ if (!IS_ERR(host->rst))
+ reset_control_assert(host->rst);
+
+ /* HC_PSW powerup */
+ ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
+ udelay(10);
+ /* notify PWR ready */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
+ UFS_DEVICE_RESET_CTRL);
+
+ reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
+ reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
+ /* set cfg clk freq */
+ ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
+ /* set ref clk freq */
+ ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
+ /* bypass ufs clk gate */
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+
+ /* open psw clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
+ /* disable ufshc iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
+ /* disable phy iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
+ /* notice iso disable */
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
+
+ /* disable lp_reset_n */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
+ mdelay(1);
+
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
+ UFS_DEVICE_RESET_CTRL);
+
+ msleep(20);
+
+ /*
+ * enable the fix of linereset recovery,
+ * and enable rx_reset/tx_rest beat
+ * enable ref_clk_en override(bit5) &
+ * override value = 1(bit4), with mask
+ */
+ ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
+
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+}
+
+static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ int err;
+ uint32_t value;
+ uint32_t reg;
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /* MPHY CBOVRCTRL4 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
+ /* MPHY CBOVRCTRL5 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
+ }
+
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /* RX_Hibern8Time_Capability*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
+ /* RX_Hibern8Time_Capability*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
+ /* RX_Min_ActivateTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
+ /* RX_Min_ActivateTime*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
+ } else {
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+ }
+
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
+ if (value != 0x1)
+ dev_info(hba->dev,
+ "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
+ err = ufs_hisi_check_hibern8(hba);
+ if (err)
+ dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
+
+ if (!(host->caps & UFS_HISI_CAP_PHY10nm))
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+
+ /* disable auto H8 */
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ reg = reg & (~UFS_AHIT_AH8ITV_MASK);
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* Unipro PA_Local_TX_LCC_Enable */
+ ufshcd_disable_host_tx_lcc(hba);
+ /* close Unipro VS_Mk2ExtnSupport */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
+ if (value != 0) {
+ /* Ensure close success */
+ dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
+ }
+
+ return err;
+}
+
+static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro DL_AFC0CreditThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
+ /* Unipro DL_TC0OutAckThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
+ /* Unipro DL_TC0TXFCThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
+
+ /* not bypass ufs clk gate */
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
+ UFS_SYSCTRL);
+
+ /* select received symbol cnt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
+ /* reset counter0 and enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
+
+ return 0;
+}
+
+static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_hisi_link_startup_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ err = ufs_hisi_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
+{
+ ufshcd_init_pwr_dev_param(hisi_param);
+}
+
+static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /*
+ * Boston platform need to set SaveConfigTime to 0x13,
+ * and change sync length to maximum value
+ */
+ /* VS_DebugSaveConfigTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
+ /* g1 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
+ /* g2 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
+ /* g3 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
+ /* PA_Hibern8Time */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
+ /* PA_Tactivate */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
+ pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
+ /* VS_DebugSaveConfigTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
+ /* sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
+ }
+
+ /* update */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
+ /* PA_TxSkip */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
+ /*PA_PWRModeUserData0 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ /*PA_PWRModeUserData1 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ /*PA_PWRModeUserData2 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ /*PA_PWRModeUserData3 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ /*PA_PWRModeUserData4 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ /*PA_PWRModeUserData5 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+}
+
+static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_dev_params ufs_hisi_cap;
+ int ret = 0;
+
+ if (!dev_req_params) {
+ dev_err(hba->dev,
+ "%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_hisi_set_dev_cap(&ufs_hisi_cap);
+ ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
+ dev_max_params, dev_req_params);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ ufs_hisi_pwr_change_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_hisi_suspend_prepare(struct device *dev)
+{
+ /* RPM and SPM are different. Refer ufs_hisi_suspend() */
+ return __ufshcd_suspend_prepare(dev, false);
+}
+
+static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE)
+ return 0;
+
+ if (pm_op == UFS_RUNTIME_PM)
+ return 0;
+
+ if (host->in_suspend) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ udelay(10);
+ /* set ref_dig_clk override of PHY PCS to 0 */
+ ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
+
+ host->in_suspend = true;
+
+ return 0;
+}
+
+static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (!host->in_suspend)
+ return 0;
+
+ /* set ref_dig_clk override of PHY PCS to 1 */
+ ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
+ udelay(10);
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+
+ host->in_suspend = false;
+ return 0;
+}
+
+static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
+{
+ struct device *dev = host->hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* get resource of ufs sys ctrl */
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
+}
+
+static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_1;
+ hba->spm_lvl = UFS_PM_LVL_3;
+}
+
+/**
+ * ufs_hisi_init_common
+ * @hba: host controller instance
+ */
+static int ufs_hisi_init_common(struct ufs_hba *hba)
+{
+ int err = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
+ if (IS_ERR(host->rst)) {
+ dev_err(dev, "%s: failed to get reset control\n", __func__);
+ err = PTR_ERR(host->rst);
+ goto error;
+ }
+
+ ufs_hisi_set_pm_lvl(hba);
+
+ err = ufs_hisi_get_resource(host);
+ if (err)
+ goto error;
+
+ return 0;
+
+error:
+ ufshcd_set_variant(hba, NULL);
+ return err;
+}
+
+static int ufs_hi3660_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hisi_clk_init(hba);
+
+ ufs_hisi_soc_init(hba);
+
+ return 0;
+}
+
+static int ufs_hi3670_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hisi_clk_init(hba);
+
+ ufs_hisi_soc_init(hba);
+
+ /* Add cap for 10nm PHY variant on HI3670 SoC */
+ host = ufshcd_get_variant(hba);
+ host->caps |= UFS_HISI_CAP_PHY10nm;
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
+ .name = "hi3660",
+ .init = ufs_hi3660_init,
+ .link_startup_notify = ufs_hisi_link_startup_notify,
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
+ .name = "hi3670",
+ .init = ufs_hi3670_init,
+ .link_startup_notify = ufs_hisi_link_startup_notify,
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
+ { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
+static int ufs_hisi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
+
+ return ufshcd_pltfrm_init(pdev, of_id->data);
+}
+
+static int ufs_hisi_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_hisi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufs_hisi_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_hisi_pltform = {
+ .probe = ufs_hisi_probe,
+ .remove = ufs_hisi_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-hisi",
+ .pm = &ufs_hisi_pm_ops,
+ .of_match_table = of_match_ptr(ufs_hisi_of_match),
+ },
+};
+module_platform_driver(ufs_hisi_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ufshcd-hisi");
+MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/ufs/host/ufs-hisi.h b/drivers/ufs/host/ufs-hisi.h
new file mode 100644
index 000000000..5a90c0f4e
--- /dev/null
+++ b/drivers/ufs/host/ufs-hisi.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017, HiSilicon. All rights reserved.
+ */
+
+#ifndef UFS_HISI_H_
+#define UFS_HISI_H_
+
+#define HBRN8_POLL_TOUT_MS 1000
+
+/*
+ * ufs sysctrl specific define
+ */
+#define PSW_POWER_CTRL (0x04)
+#define PHY_ISO_EN (0x08)
+#define HC_LP_CTRL (0x0C)
+#define PHY_CLK_CTRL (0x10)
+#define PSW_CLK_CTRL (0x14)
+#define CLOCK_GATE_BYPASS (0x18)
+#define RESET_CTRL_EN (0x1C)
+#define UFS_SYSCTRL (0x5C)
+#define UFS_DEVICE_RESET_CTRL (0x60)
+
+#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
+#define BIT_SYSCTRL_PWR_READY (1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
+#define UFS_FREQ_CFG_CLK (0x39)
+#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
+#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
+#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
+#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
+#define MASK_UFS_DEVICE_RESET (0x1 << 16)
+#define BIT_UFS_DEVICE_RESET (0x1)
+
+/*
+ * M-TX Configuration Attributes for Hixxxx
+ */
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+
+/*
+ * Hixxxx UFS HC specific Registers
+ */
+enum {
+ UFS_REG_OCPTHRTL = 0xc0,
+ UFS_REG_OOCPR = 0xc4,
+
+ UFS_REG_CDACFG = 0xd0,
+ UFS_REG_CDATX1 = 0xd4,
+ UFS_REG_CDATX2 = 0xd8,
+ UFS_REG_CDARX1 = 0xdc,
+ UFS_REG_CDARX2 = 0xe0,
+ UFS_REG_CDASTA = 0xe4,
+
+ UFS_REG_LBMCFG = 0xf0,
+ UFS_REG_LBMSTA = 0xf4,
+ UFS_REG_UFSMODE = 0xf8,
+
+ UFS_REG_HCLKDIV = 0xfc,
+};
+
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFS_AHIT_AH8ITV_MASK 0x3FF
+
+/* REG UFS_REG_OCPTHRTL definition */
+#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_HISI_CAP_RESERVED BIT(0)
+#define UFS_HISI_CAP_PHY10nm BIT(1)
+
+struct ufs_hisi_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_sys_ctrl;
+
+ struct reset_control *rst;
+
+ uint64_t caps;
+
+ bool in_suspend;
+};
+
+#define ufs_sys_ctrl_writel(host, val, reg) \
+ writel((val), (host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_set_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel( \
+ (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel((host), \
+ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
+ (reg))
+
+#endif /* UFS_HISI_H_ */
diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
new file mode 100644
index 000000000..b5f2ec314
--- /dev/null
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs_mtk
+
+#if !defined(_TRACE_EVENT_UFS_MEDIATEK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_UFS_MEDIATEK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ufs_mtk_event,
+ TP_PROTO(unsigned int type, unsigned int data),
+ TP_ARGS(type, data),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, type)
+ __field(unsigned int, data)
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->data = data;
+ ),
+
+ TP_printk("ufs: event=%u data=%u",
+ __entry->type, __entry->data)
+);
+
+TRACE_EVENT(ufs_mtk_clk_scale,
+ TP_PROTO(const char *name, bool scale_up, unsigned long clk_rate),
+ TP_ARGS(name, scale_up, clk_rate),
+
+ TP_STRUCT__entry(
+ __field(const char*, name)
+ __field(bool, scale_up)
+ __field(unsigned long, clk_rate)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->scale_up = scale_up;
+ __entry->clk_rate = clk_rate;
+ ),
+
+ TP_printk("ufs: clk (%s) scaled %s @ %lu",
+ __entry->name,
+ __entry->scale_up ? "up" : "down",
+ __entry->clk_rate)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/host
+#define TRACE_INCLUDE_FILE ufs-mediatek-trace
+#include <trace/define_trace.h>
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
new file mode 100644
index 000000000..7309f3f87
--- /dev/null
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -0,0 +1,1669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Authors:
+ * Stanley Chu <stanley.chu@mediatek.com>
+ * Peter Wang <peter.wang@mediatek.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
+#include "ufs-mediatek.h"
+
+#define CREATE_TRACE_POINTS
+#include "ufs-mediatek-trace.h"
+
+static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
+ { .wmanufacturerid = UFS_ANY_VENDOR,
+ .model = UFS_ANY_MODEL,
+ .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
+ { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
+ .model = "H9HQ21AFAMZDAR",
+ .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
+ {}
+};
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+ { .compatible = "mediatek,mt8183-ufshci" },
+ {},
+};
+
+/*
+ * Details of UIC Errors
+ */
+static const char *const ufs_uic_err_str[] = {
+ "PHY Adapter Layer",
+ "Data Link Layer",
+ "Network Link Layer",
+ "Transport Link Layer",
+ "DME"
+};
+
+static const char *const ufs_uic_pa_err_str[] = {
+ "PHY error on Lane 0",
+ "PHY error on Lane 1",
+ "PHY error on Lane 2",
+ "PHY error on Lane 3",
+ "Generic PHY Adapter Error. This should be the LINERESET indication"
+};
+
+static const char *const ufs_uic_dl_err_str[] = {
+ "NAC_RECEIVED",
+ "TCx_REPLAY_TIMER_EXPIRED",
+ "AFCx_REQUEST_TIMER_EXPIRED",
+ "FCx_PROTECTION_TIMER_EXPIRED",
+ "CRC_ERROR",
+ "RX_BUFFER_OVERFLOW",
+ "MAX_FRAME_LENGTH_EXCEEDED",
+ "WRONG_SEQUENCE_NUMBER",
+ "AFC_FRAME_SYNTAX_ERROR",
+ "NAC_FRAME_SYNTAX_ERROR",
+ "EOF_SYNTAX_ERROR",
+ "FRAME_SYNTAX_ERROR",
+ "BAD_CTRL_SYMBOL_TYPE",
+ "PA_INIT_ERROR",
+ "PA_ERROR_IND_RECEIVED",
+ "PA_INIT"
+};
+
+static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+}
+
+static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+}
+
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
+static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+}
+
+static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
+{
+ u32 tmp;
+
+ if (enable) {
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ tmp = tmp |
+ (1 << RX_SYMBOL_CLK_GATE_EN) |
+ (1 << SYS_CLK_GATE_EN) |
+ (1 << TX_CLK_GATE_EN);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+ tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+ } else {
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
+ (1 << SYS_CLK_GATE_EN) |
+ (1 << TX_CLK_GATE_EN));
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+ tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+ }
+}
+
+static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_crypto_ctrl(res, 1);
+ if (res.a0) {
+ dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
+ __func__, res.a0);
+ hba->caps &= ~UFSHCD_CAP_CRYPTO;
+ }
+}
+
+static void ufs_mtk_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ reset_control_assert(host->hci_reset);
+ reset_control_assert(host->crypto_reset);
+ reset_control_assert(host->unipro_reset);
+
+ usleep_range(100, 110);
+
+ reset_control_deassert(host->unipro_reset);
+ reset_control_deassert(host->crypto_reset);
+ reset_control_deassert(host->hci_reset);
+}
+
+static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
+ struct reset_control **rc,
+ char *str)
+{
+ *rc = devm_reset_control_get(hba->dev, str);
+ if (IS_ERR(*rc)) {
+ dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
+ str, PTR_ERR(*rc));
+ *rc = NULL;
+ }
+}
+
+static void ufs_mtk_init_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ufs_mtk_init_reset_control(hba, &host->hci_reset,
+ "hci_rst");
+ ufs_mtk_init_reset_control(hba, &host->unipro_reset,
+ "unipro_rst");
+ ufs_mtk_init_reset_control(hba, &host->crypto_reset,
+ "crypto_rst");
+}
+
+static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ if (host->unipro_lpm) {
+ hba->vps->hba_enable_delay_us = 0;
+ } else {
+ hba->vps->hba_enable_delay_us = 600;
+ ufs_mtk_host_reset(hba);
+ }
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_mtk_crypto_enable(hba);
+
+ if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
+ ufshcd_writel(hba, 0,
+ REG_AUTO_HIBERNATE_IDLE_TIMER);
+ hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
+ hba->ahit = 0;
+ }
+
+ /*
+ * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
+ * to prevent host hang issue
+ */
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
+ REG_UFS_XOUFS_CTRL);
+ }
+
+ return 0;
+}
+
+static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ host->mphy = devm_of_phy_get_by_index(dev, np, 0);
+
+ if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_info(dev,
+ "%s: required phy hasn't probed yet. err = %d\n",
+ __func__, err);
+ } else if (IS_ERR(host->mphy)) {
+ err = PTR_ERR(host->mphy);
+ if (err != -ENODEV) {
+ dev_info(dev, "%s: PHY get failed %d\n", __func__,
+ err);
+ }
+ }
+
+ if (err)
+ host->mphy = NULL;
+ /*
+ * Allow unbound mphy because not every platform needs specific
+ * mphy control.
+ */
+ if (err == -ENODEV)
+ err = 0;
+
+ return err;
+}
+
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct arm_smccc_res res;
+ ktime_t timeout, time_checked;
+ u32 value;
+
+ if (host->ref_clk_enabled == on)
+ return 0;
+
+ ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
+
+ if (on) {
+ ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+ } else {
+ ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
+ ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+ }
+
+ /* Wait for ack */
+ timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
+ do {
+ time_checked = ktime_get();
+ value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+ /* Wait until ack bit equals to req bit */
+ if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+ goto out;
+
+ usleep_range(100, 200);
+ } while (ktime_before(time_checked, timeout));
+
+ dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+
+ return -ETIMEDOUT;
+
+out:
+ host->ref_clk_enabled = on;
+ if (on)
+ ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
+
+ ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
+
+ return 0;
+}
+
+static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
+ u16 gating_us)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (hba->dev_info.clk_gating_wait_us) {
+ host->ref_clk_gating_wait_us =
+ hba->dev_info.clk_gating_wait_us;
+ } else {
+ host->ref_clk_gating_wait_us = gating_us;
+ }
+
+ host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
+}
+
+static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
+ ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
+ ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
+ ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
+ ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
+ } else {
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ }
+}
+
+static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+ unsigned long retry_ms)
+{
+ u64 timeout, time_checked;
+ u32 val, sm;
+ bool wait_idle;
+
+ /* cannot use plain ktime_get() in suspend */
+ timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
+
+ /* wait a specific time after check base */
+ udelay(10);
+ wait_idle = false;
+
+ do {
+ time_checked = ktime_get_mono_fast_ns();
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+
+ sm = val & 0x1f;
+
+ /*
+ * if state is in H8 enter and H8 enter confirm
+ * wait until return to idle state.
+ */
+ if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
+ wait_idle = true;
+ udelay(50);
+ continue;
+ } else if (!wait_idle)
+ break;
+
+ if (wait_idle && (sm == VS_HCE_BASE))
+ break;
+ } while (time_checked < timeout);
+
+ if (wait_idle && sm != VS_HCE_BASE)
+ dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
+}
+
+static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
+ unsigned long max_wait_ms)
+{
+ ktime_t timeout, time_checked;
+ u32 val;
+
+ timeout = ktime_add_ms(ktime_get(), max_wait_ms);
+ do {
+ time_checked = ktime_get();
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+
+ if (val == state)
+ return 0;
+
+ /* Sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (ktime_before(time_checked, timeout));
+
+ if (val == state)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct phy *mphy = host->mphy;
+ struct arm_smccc_res res;
+ int ret = 0;
+
+ if (!mphy || !(on ^ host->mphy_powered_on))
+ return 0;
+
+ if (on) {
+ if (ufs_mtk_is_va09_supported(hba)) {
+ ret = regulator_enable(host->reg_va09);
+ if (ret < 0)
+ goto out;
+ /* wait 200 us to stablize VA09 */
+ usleep_range(200, 210);
+ ufs_mtk_va09_pwr_ctrl(res, 1);
+ }
+ phy_power_on(mphy);
+ } else {
+ phy_power_off(mphy);
+ if (ufs_mtk_is_va09_supported(hba)) {
+ ufs_mtk_va09_pwr_ctrl(res, 0);
+ ret = regulator_disable(host->reg_va09);
+ if (ret < 0)
+ goto out;
+ }
+ }
+out:
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to %s va09: %d\n",
+ on ? "enable" : "disable",
+ ret);
+ } else {
+ host->mphy_powered_on = on;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
+ struct clk **clk_out)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk))
+ err = PTR_ERR(clk);
+ else
+ *clk_out = clk;
+
+ return err;
+}
+
+static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct regulator *reg;
+ int volt, ret;
+
+ if (!ufs_mtk_is_boost_crypt_enabled(hba))
+ return;
+
+ cfg = host->crypt;
+ volt = cfg->vcore_volt;
+ reg = cfg->reg_vcore;
+
+ ret = clk_prepare_enable(cfg->clk_crypt_mux);
+ if (ret) {
+ dev_info(hba->dev, "clk_prepare_enable(): %d\n",
+ ret);
+ return;
+ }
+
+ if (boost) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to %d\n", volt);
+ goto out;
+ }
+
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_perf);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_perf\n");
+ regulator_set_voltage(reg, 0, INT_MAX);
+ goto out;
+ }
+ } else {
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_lp);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_lp\n");
+ goto out;
+ }
+
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
+out:
+ clk_disable_unprepare(cfg->clk_crypt_mux);
+}
+
+static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
+ struct clk **clk)
+{
+ int ret;
+
+ ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
+ if (ret) {
+ dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
+ name, ret);
+ }
+
+ return ret;
+}
+
+static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
+
+ host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
+ GFP_KERNEL);
+ if (!host->crypt)
+ goto disable_caps;
+
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ goto disable_caps;
+ }
+
+ if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get boost-crypt-vcore-min");
+ goto disable_caps;
+ }
+
+ cfg = host->crypt;
+ if (ufs_mtk_init_host_clk(hba, "crypt_mux",
+ &cfg->clk_crypt_mux))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_lp",
+ &cfg->clk_crypt_lp))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_perf",
+ &cfg->clk_crypt_perf))
+ goto disable_caps;
+
+ cfg->reg_vcore = reg;
+ cfg->vcore_volt = volt;
+ host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
+
+disable_caps:
+ return;
+}
+
+static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ host->reg_va09 = regulator_get(hba->dev, "va09");
+ if (IS_ERR(host->reg_va09))
+ dev_info(hba->dev, "failed to get va09");
+ else
+ host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
+}
+
+static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct device_node *np = hba->dev->of_node;
+
+ if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
+ ufs_mtk_init_boost_crypt(hba);
+
+ if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
+ ufs_mtk_init_va09_pwr_ctrl(hba);
+
+ if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
+ host->caps |= UFS_MTK_CAP_DISABLE_AH8;
+
+ if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+ host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
+ if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
+ host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
+
+ dev_info(hba->dev, "caps: 0x%x", host->caps);
+}
+
+static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (!host || !host->pm_qos_init)
+ return;
+
+ cpu_latency_qos_update_request(&host->pm_qos_req,
+ boost ? 0 : PM_QOS_DEFAULT_VALUE);
+}
+
+static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
+{
+ ufs_mtk_boost_crypt(hba, scale_up);
+ ufs_mtk_boost_pm_qos(hba, scale_up);
+}
+
+static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (on) {
+ phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
+ } else {
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
+ ufs_mtk_setup_ref_clk(hba, on);
+ phy_power_off(host->mphy);
+ }
+}
+
+/**
+ * ufs_mtk_setup_clocks - enables/disable clocks
+ * @hba: host controller instance
+ * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ bool clk_pwr_off = false;
+ int ret = 0;
+
+ /*
+ * In case ufs_mtk_init() is not yet done, simply ignore.
+ * This ufs_mtk_setup_clocks() shall be called from
+ * ufs_mtk_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (!on && status == PRE_CHANGE) {
+ if (ufshcd_is_link_off(hba)) {
+ clk_pwr_off = true;
+ } else if (ufshcd_is_link_hibern8(hba) ||
+ (!ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) {
+ /*
+ * Gate ref-clk and poweroff mphy if link state is in
+ * OFF or Hibern8 by either Auto-Hibern8 or
+ * ufshcd_link_state_transition().
+ */
+ ret = ufs_mtk_wait_link_state(hba,
+ VS_LINK_HIBERN8,
+ 15);
+ if (!ret)
+ clk_pwr_off = true;
+ }
+
+ if (clk_pwr_off)
+ ufs_mtk_pwr_ctrl(hba, false);
+ } else if (on && status == POST_CHANGE) {
+ ufs_mtk_pwr_ctrl(hba, true);
+ }
+
+ return ret;
+}
+
+static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret, ver = 0;
+
+ if (host->hw_ver.major)
+ return;
+
+ /* Set default (minimum) version anyway */
+ host->hw_ver.major = 2;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
+ if (!ret) {
+ if (ver >= UFS_UNIPRO_VER_1_8) {
+ host->hw_ver.major = 3;
+ /*
+ * Fix HCI version for some platforms with
+ * incorrect version
+ */
+ if (hba->ufs_version < ufshci_version(3, 0))
+ hba->ufs_version = ufshci_version(3, 0);
+ }
+ }
+}
+
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+ return hba->ufs_version;
+}
+
+/**
+ * ufs_mtk_init_clocks - Init mtk driver private clocks
+ *
+ * @hba: per adapter instance
+ */
+static void ufs_mtk_init_clocks(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki, *clki_tmp;
+
+ /*
+ * Find private clocks and store them in struct ufs_mtk_clk.
+ * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
+ * being switched on/off in clock gating.
+ */
+ list_for_each_entry_safe(clki, clki_tmp, head, list) {
+ if (!strcmp(clki->name, "ufs_sel")) {
+ host->mclk.ufs_sel_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
+ host->mclk.ufs_sel_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
+ host->mclk.ufs_sel_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ }
+ }
+
+ if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
+ !mclk->ufs_sel_min_clki) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+ dev_info(hba->dev,
+ "%s: Clk-scaling not ready. Feature disabled.",
+ __func__);
+ }
+}
+
+#define MAX_VCC_NAME 30
+static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct device_node *np = hba->dev->of_node;
+ struct device *dev = hba->dev;
+ char vcc_name[MAX_VCC_NAME];
+ struct arm_smccc_res res;
+ int err, ver;
+
+ if (hba->vreg_info.vcc)
+ return 0;
+
+ if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
+ ufs_mtk_get_vcc_num(res);
+ if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
+ else
+ return -ENODEV;
+ } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
+ ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
+ } else {
+ return 0;
+ }
+
+ err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+ if (err)
+ return err;
+
+ err = ufshcd_get_vreg(dev, info->vcc);
+ if (err)
+ return err;
+
+ err = regulator_enable(info->vcc->reg);
+ if (!err) {
+ info->vcc->enabled = true;
+ dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
+ }
+
+ return err;
+}
+
+static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct ufs_vreg **vreg_on, **vreg_off;
+
+ if (hba->dev_info.wspecversion >= 0x0300) {
+ vreg_on = &info->vccq;
+ vreg_off = &info->vccq2;
+ } else {
+ vreg_on = &info->vccq2;
+ vreg_off = &info->vccq;
+ }
+
+ if (*vreg_on)
+ (*vreg_on)->always_on = true;
+
+ if (*vreg_off) {
+ regulator_disable((*vreg_off)->reg);
+ devm_kfree(hba->dev, (*vreg_off)->name);
+ devm_kfree(hba->dev, *vreg_off);
+ *vreg_off = NULL;
+ }
+}
+
+/**
+ * ufs_mtk_init - find other essential mmio bases
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_mtk_init(struct ufs_hba *hba)
+{
+ const struct of_device_id *id;
+ struct device *dev = hba->dev;
+ struct ufs_mtk_host *host;
+ int err = 0;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ err = -ENOMEM;
+ dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
+ goto out;
+ }
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ id = of_match_device(ufs_mtk_of_match, dev);
+ if (!id) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize host capability */
+ ufs_mtk_init_host_caps(hba);
+
+ err = ufs_mtk_bind_mphy(hba);
+ if (err)
+ goto out_variant_clear;
+
+ ufs_mtk_init_reset(hba);
+
+ /* Enable runtime autosuspend */
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ /* Enable clock-gating */
+ hba->caps |= UFSHCD_CAP_CLK_GATING;
+
+ /* Enable inline encryption */
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Enable clk scaling*/
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+
+ hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
+ if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
+ hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+ ufs_mtk_init_clocks(hba);
+
+ /*
+ * ufshcd_vops_init() is invoked after
+ * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
+ * phy clock setup is skipped.
+ *
+ * Enable phy clocks specifically here.
+ */
+ ufs_mtk_mphy_power_on(hba, true);
+ ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+
+ host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ /* Initialize pm-qos request */
+ cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ host->pm_qos_init = true;
+
+ goto out;
+
+out_variant_clear:
+ ufshcd_set_variant(hba, NULL);
+out:
+ return err;
+}
+
+static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ if (!ufs_mtk_is_pmc_via_fastauto(hba))
+ return false;
+
+ if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
+ return false;
+
+ if (dev_req_params->pwr_tx != FAST_MODE &&
+ dev_req_params->gear_tx < UFS_HS_G4)
+ return false;
+
+ if (dev_req_params->pwr_rx != FAST_MODE &&
+ dev_req_params->gear_rx < UFS_HS_G4)
+ return false;
+
+ return true;
+}
+
+static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_dev_params host_cap;
+ int ret;
+
+ ufshcd_init_pwr_dev_param(&host_cap);
+ host_cap.hs_rx_gear = UFS_HS_G5;
+ host_cap.hs_tx_gear = UFS_HS_G5;
+
+ ret = ufshcd_get_pwr_dev_param(&host_cap,
+ dev_max_params,
+ dev_req_params);
+ if (ret) {
+ pr_info("%s: failed to determine capabilities\n",
+ __func__);
+ }
+
+ if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ dev_req_params->lane_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ dev_req_params->lane_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ dev_req_params->hs_rate);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+
+ ret = ufshcd_uic_change_pwr_mode(hba,
+ FASTAUTO_MODE << 4 | FASTAUTO_MODE);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
+ __func__, ret);
+ }
+ }
+
+ if (host->hw_ver.major >= 3) {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status stage,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int ret = 0;
+
+ switch (stage) {
+ case PRE_CHANGE:
+ ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
+ dev_req_params);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ int ret;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ lpm ? 1 : 0);
+ if (!ret || !lpm) {
+ /*
+ * Forcibly set as non-LPM mode if UIC commands is failed
+ * to use default hba_enable_delay_us value for re-enabling
+ * the host.
+ */
+ host->unipro_lpm = lpm;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_pre_link(struct ufs_hba *hba)
+{
+ int ret;
+ u32 tmp;
+
+ ufs_mtk_get_controller_version(hba);
+
+ ret = ufs_mtk_unipro_set_lpm(hba, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting PA_Local_TX_LCC_Enable to 0 before link startup
+ * to make sure that both host and device TX LCC are disabled
+ * once link startup is completed.
+ */
+ ret = ufshcd_disable_host_tx_lcc(hba);
+ if (ret)
+ return ret;
+
+ /* disable deep stall */
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~(1 << 6);
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ return ret;
+}
+
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ u32 ah_ms;
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+ ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ else
+ ah_ms = 10;
+ ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
+ }
+}
+
+static int ufs_mtk_post_link(struct ufs_hba *hba)
+{
+ /* enable unipro clock gating feature */
+ ufs_mtk_cfg_unipro_cg(hba, true);
+
+ /* will be configured during probe hba */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
+
+ ufs_mtk_setup_clk_gating(hba);
+
+ return 0;
+}
+
+static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status stage)
+{
+ int ret = 0;
+
+ switch (stage) {
+ case PRE_CHANGE:
+ ret = ufs_mtk_pre_link(hba);
+ break;
+ case POST_CHANGE:
+ ret = ufs_mtk_post_link(hba);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ /* disable hba before device reset */
+ ufshcd_hba_stop(hba);
+
+ ufs_mtk_device_reset_ctrl(0, res);
+
+ /*
+ * The reset signal is active low. UFS devices shall detect
+ * more than or equal to 1us of positive or negative RST_n
+ * pulse width.
+ *
+ * To be on safe side, keep the reset low for at least 10us.
+ */
+ usleep_range(10, 15);
+
+ ufs_mtk_device_reset_ctrl(1, res);
+
+ /* Some devices may need time to respond to rst_n */
+ usleep_range(10000, 15000);
+
+ dev_info(hba->dev, "device reset done\n");
+
+ return 0;
+}
+
+static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ return err;
+
+ err = ufs_mtk_unipro_set_lpm(hba, false);
+ if (err)
+ return err;
+
+ err = ufshcd_uic_hibern8_exit(hba);
+ if (!err)
+ ufshcd_set_link_active(hba);
+ else
+ return err;
+
+ err = ufshcd_make_hba_operational(hba);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
+{
+ int err;
+
+ /* Disable reset confirm feature by UniPro */
+ ufshcd_writel(hba,
+ (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
+ REG_UFS_XOUFS_CTRL);
+
+ err = ufs_mtk_unipro_set_lpm(hba, true);
+ if (err) {
+ /* Resume UniPro state for following error recovery */
+ ufs_mtk_unipro_set_lpm(hba, false);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct ufs_vreg *vccqx = NULL;
+
+ if (hba->vreg_info.vccq)
+ vccqx = hba->vreg_info.vccq;
+ else
+ vccqx = hba->vreg_info.vccq2;
+
+ regulator_set_mode(vccqx->reg,
+ lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+}
+
+static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_pwr_ctrl(!lpm,
+ (unsigned long)hba->dev_info.wspecversion,
+ res);
+}
+
+static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
+ /* Skip if VCC is assumed always-on */
+ if (!hba->vreg_info.vcc)
+ return;
+
+ /* Bypass LPM when device is still active */
+ if (lpm && ufshcd_is_ufs_dev_active(hba))
+ return;
+
+ /* Bypass LPM if VCC is enabled */
+ if (lpm && hba->vreg_info.vcc->enabled)
+ return;
+
+ if (lpm) {
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ } else {
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ }
+}
+
+static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* disable auto-hibern8 */
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ufs_mtk_wait_idle_state(hba, 5);
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+ if (ret)
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+}
+
+static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ int err;
+ struct arm_smccc_res res;
+
+ if (status == PRE_CHANGE) {
+ if (!ufshcd_is_auto_hibern8_supported(hba))
+ return 0;
+ ufs_mtk_auto_hibern8_disable(hba);
+ return 0;
+ }
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ err = ufs_mtk_link_set_lpm(hba);
+ if (err)
+ goto fail;
+ }
+
+ if (!ufshcd_is_link_active(hba)) {
+ /*
+ * Make sure no error will be returned to prevent
+ * ufshcd_suspend() re-enabling regulators while vreg is still
+ * in low-power mode.
+ */
+ err = ufs_mtk_mphy_power_on(hba, false);
+ if (err)
+ goto fail;
+ }
+
+ if (ufshcd_is_link_off(hba))
+ ufs_mtk_device_reset_ctrl(0, res);
+
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
+
+ return 0;
+fail:
+ /*
+ * Set link as off state enforcedly to trigger
+ * ufshcd_host_reset_and_restore() in ufshcd_suspend()
+ * for completed host reset.
+ */
+ ufshcd_set_link_off(hba);
+ return -EAGAIN;
+}
+
+static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int err;
+ struct arm_smccc_res res;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
+
+ err = ufs_mtk_mphy_power_on(hba, true);
+ if (err)
+ goto fail;
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ err = ufs_mtk_link_set_hpm(hba);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return ufshcd_link_recovery(hba);
+}
+
+static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
+{
+ /* Dump ufshci register 0x140 ~ 0x14C */
+ ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
+ "XOUFS Ctrl (0x140): ");
+
+ ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+
+ /* Dump ufshci register 0x2200 ~ 0x22AC */
+ ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
+ REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
+ "MPHY Ctrl (0x2200): ");
+
+ /* Direct debugging information to REG_MTK_PROBE */
+ ufs_mtk_dbg_sel(hba);
+ ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
+}
+
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
+
+ if (mid == UFS_VENDOR_SAMSUNG) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
+ }
+
+ /*
+ * Decide waiting time before gating reference clock and
+ * after ungating reference clock according to vendors'
+ * requirements.
+ */
+ if (mid == UFS_VENDOR_SAMSUNG)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 1);
+ else if (mid == UFS_VENDOR_SKHYNIX)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 30);
+ else if (mid == UFS_VENDOR_TOSHIBA)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 100);
+ else
+ ufs_mtk_setup_ref_clk_wait_us(hba,
+ REFCLK_DEFAULT_WAIT_US);
+ return 0;
+}
+
+static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ hba->vreg_info.vcc->always_on = true;
+ /*
+ * VCC will be kept always-on thus we don't
+ * need any delay during regulator operations
+ */
+ hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ }
+
+ ufs_mtk_vreg_fix_vcc(hba);
+ ufs_mtk_vreg_fix_vccqx(hba);
+}
+
+static void ufs_mtk_event_notify(struct ufs_hba *hba,
+ enum ufs_event_type evt, void *data)
+{
+ unsigned int val = *(u32 *)data;
+ unsigned long reg;
+ u8 bit;
+
+ trace_ufs_mtk_event(evt, val);
+
+ /* Print details of UIC Errors */
+ if (evt <= UFS_EVT_DME_ERR) {
+ dev_info(hba->dev,
+ "Host UIC Error Code (%s): %08x\n",
+ ufs_uic_err_str[evt], val);
+ reg = val;
+ }
+
+ if (evt == UFS_EVT_PA_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
+ }
+
+ if (evt == UFS_EVT_DL_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
+ }
+}
+
+static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data)
+{
+ /* Customize min gear in clk scaling */
+ hba->clk_scaling.min_gear = UFS_HS_G4;
+
+ hba->vps->devfreq_profile.polling_ms = 200;
+ hba->vps->ondemand_data.upthreshold = 50;
+ hba->vps->ondemand_data.downdifferential = 20;
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+ int ret = 0;
+
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+
+ if (scale_up) {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
+ clki->curr_freq = clki->max_freq;
+ } else {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
+ clki->curr_freq = clki->min_freq;
+ }
+
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ }
+
+ clk_disable_unprepare(clki->clk);
+
+ trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
+}
+
+static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ enum ufs_notify_change_status status)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return 0;
+
+ if (status == PRE_CHANGE) {
+ /* Switch parent before clk_set_rate() */
+ ufs_mtk_clk_scale(hba, scale_up);
+ } else {
+ /* Request interrupt latency QoS accordingly */
+ ufs_mtk_scale_perf(hba, scale_up);
+ }
+
+ return 0;
+}
+
+/*
+ * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+ .name = "mediatek.ufshci",
+ .init = ufs_mtk_init,
+ .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
+ .setup_clocks = ufs_mtk_setup_clocks,
+ .hce_enable_notify = ufs_mtk_hce_enable_notify,
+ .link_startup_notify = ufs_mtk_link_startup_notify,
+ .pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
+ .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
+ .suspend = ufs_mtk_suspend,
+ .resume = ufs_mtk_resume,
+ .dbg_register_dump = ufs_mtk_dbg_register_dump,
+ .device_reset = ufs_mtk_device_reset,
+ .event_notify = ufs_mtk_event_notify,
+ .config_scaling_param = ufs_mtk_config_scaling_param,
+ .clk_scale_notify = ufs_mtk_clk_scale_notify,
+};
+
+/**
+ * ufs_mtk_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int ufs_mtk_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct device_node *reset_node;
+ struct platform_device *reset_pdev;
+ struct device_link *link;
+
+ reset_node = of_find_compatible_node(NULL, NULL,
+ "ti,syscon-reset");
+ if (!reset_node) {
+ dev_notice(dev, "find ti,syscon-reset fail\n");
+ goto skip_reset;
+ }
+ reset_pdev = of_find_device_by_node(reset_node);
+ if (!reset_pdev) {
+ dev_notice(dev, "find reset_pdev fail\n");
+ goto skip_reset;
+ }
+ link = device_link_add(dev, &reset_pdev->dev,
+ DL_FLAG_AUTOPROBE_CONSUMER);
+ put_device(&reset_pdev->dev);
+ if (!link) {
+ dev_notice(dev, "add reset device_link fail\n");
+ goto skip_reset;
+ }
+ /* supplier is not probed */
+ if (link->status == DL_STATE_DORMANT) {
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+
+skip_reset:
+ /* perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+
+out:
+ if (err)
+ dev_info(dev, "probe failed %d\n", err);
+
+ of_node_put(reset_node);
+ return err;
+}
+
+/**
+ * ufs_mtk_remove - set driver_data of the device to NULL
+ * @pdev: pointer to platform device handle
+ *
+ * Always return 0
+ */
+static int ufs_mtk_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ufs_mtk_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops ufs_mtk_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
+ ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
+ ufs_mtk_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_mtk_pltform = {
+ .probe = ufs_mtk_probe,
+ .remove = ufs_mtk_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-mtk",
+ .pm = &ufs_mtk_pm_ops,
+ .of_match_table = ufs_mtk_of_match,
+ },
+};
+
+MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
+MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek UFS Host Driver");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
new file mode 100644
index 000000000..2fc6d7b87
--- /dev/null
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+
+#ifndef _UFS_MEDIATEK_H
+#define _UFS_MEDIATEK_H
+
+#include <linux/bitops.h>
+#include <linux/pm_qos.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_XOUFS_CTRL 0x140
+#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_EXTREG 0x2100
+#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_MTK_IP_VER 0x2240
+#define REG_UFS_REJECT_MON 0x22AC
+#define REG_UFS_DEBUG_SEL 0x22C0
+#define REG_UFS_PROBE 0x22C8
+#define REG_UFS_DEBUG_SEL_B0 0x22D0
+#define REG_UFS_DEBUG_SEL_B1 0x22D4
+#define REG_UFS_DEBUG_SEL_B2 0x22D8
+#define REG_UFS_DEBUG_SEL_B3 0x22DC
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE 0x0
+#define REFCLK_REQUEST BIT(0)
+#define REFCLK_ACK BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_US 3000
+#define REFCLK_DEFAULT_WAIT_US 32
+
+/*
+ * Other attributes
+ */
+#define VS_DEBUGCLOCKENABLE 0xD0A1
+#define VS_SAVEPOWERCONTROL 0xD0A6
+#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
+
+/*
+ * Vendor specific link state
+ */
+enum {
+ VS_LINK_DISABLED = 0,
+ VS_LINK_DOWN = 1,
+ VS_LINK_UP = 2,
+ VS_LINK_HIBERN8 = 3,
+ VS_LINK_LOST = 4,
+ VS_LINK_CFG = 5,
+};
+
+/*
+ * Vendor specific host controller state
+ */
+enum {
+ VS_HCE_RESET = 0,
+ VS_HCE_BASE = 1,
+ VS_HCE_OOCPR_WAIT = 2,
+ VS_HCE_DME_RESET = 3,
+ VS_HCE_MIDDLE = 4,
+ VS_HCE_DME_ENABLE = 5,
+ VS_HCE_DEFAULTS = 6,
+ VS_HIB_IDLEEN = 7,
+ VS_HIB_ENTER = 8,
+ VS_HIB_ENTER_CONF = 9,
+ VS_HIB_MIDDLE = 10,
+ VS_HIB_WAITTIMER = 11,
+ VS_HIB_EXIT_CONF = 12,
+ VS_HIB_EXIT = 13,
+};
+
+/*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0)
+#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
+#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5)
+#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
+#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
+
+/*
+ * VS_DEBUGCLOCKENABLE
+ */
+enum {
+ TX_SYMBOL_CLK_REQ_FORCE = 5,
+};
+
+/*
+ * VS_SAVEPOWERCONTROL
+ */
+enum {
+ RX_SYMBOL_CLK_GATE_EN = 0,
+ SYS_CLK_GATE_EN = 2,
+ TX_CLK_GATE_EN = 3,
+};
+
+/*
+ * Host capability
+ */
+enum ufs_mtk_host_caps {
+ UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
+ UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
+ UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
+ UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
+ UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6,
+};
+
+struct ufs_mtk_crypt_cfg {
+ struct regulator *reg_vcore;
+ struct clk *clk_crypt_perf;
+ struct clk *clk_crypt_mux;
+ struct clk *clk_crypt_lp;
+ int vcore_volt;
+};
+
+struct ufs_mtk_clk {
+ struct ufs_clk_info *ufs_sel_clki; /* Mux */
+ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+};
+
+struct ufs_mtk_hw_ver {
+ u8 step;
+ u8 minor;
+ u8 major;
+};
+
+struct ufs_mtk_host {
+ struct phy *mphy;
+ struct pm_qos_request pm_qos_req;
+ struct regulator *reg_va09;
+ struct reset_control *hci_reset;
+ struct reset_control *unipro_reset;
+ struct reset_control *crypto_reset;
+ struct ufs_hba *hba;
+ struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_clk mclk;
+ struct ufs_mtk_hw_ver hw_ver;
+ enum ufs_mtk_host_caps caps;
+ bool mphy_powered_on;
+ bool pm_qos_init;
+ bool unipro_lpm;
+ bool ref_clk_enabled;
+ u16 ref_clk_ungating_wait_us;
+ u16 ref_clk_gating_wait_us;
+ u32 ip_ver;
+};
+
+/*
+ * Multi-VCC by Numbering
+ */
+enum ufs_mtk_vcc_num {
+ UFS_VCC_NONE = 0,
+ UFS_VCC_1,
+ UFS_VCC_2,
+ UFS_VCC_MAX
+};
+
+/*
+ * Host Power Control options
+ */
+enum {
+ HOST_PWR_HCI = 0,
+ HOST_PWR_MPHY
+};
+
+/*
+ * SMC call wrapper function
+ */
+struct ufs_mtk_smc_arg {
+ unsigned long cmd;
+ struct arm_smccc_res *res;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+ unsigned long v4;
+ unsigned long v5;
+ unsigned long v6;
+ unsigned long v7;
+};
+
+static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
+{
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL,
+ s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
+}
+
+#define ufs_mtk_smc(...) \
+ _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
+
+/*
+ * SMC call interface
+ */
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_crypto_ctrl(res, enable) \
+ ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
+
+#define ufs_mtk_ref_clk_notify(on, stage, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
+
+#define ufs_mtk_host_pwr_ctrl(opt, on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on)
+
+#define ufs_mtk_get_vcc_num(res) \
+ ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
+
+#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver)
+
+#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c
new file mode 100644
index 000000000..62387ccd5
--- /dev/null
+++ b/drivers/ufs/host/ufs-qcom-ice.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm ICE (Inline Crypto Engine) support.
+ *
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+
+#include "ufs-qcom.h"
+
+#define AES_256_XTS_KEY_SIZE 64
+
+/* QCOM ICE registers */
+
+#define QCOM_ICE_REG_CONTROL 0x0000
+#define QCOM_ICE_REG_RESET 0x0004
+#define QCOM_ICE_REG_VERSION 0x0008
+#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_REG_PARAMETERS_1 0x0014
+#define QCOM_ICE_REG_PARAMETERS_2 0x0018
+#define QCOM_ICE_REG_PARAMETERS_3 0x001C
+#define QCOM_ICE_REG_PARAMETERS_4 0x0020
+#define QCOM_ICE_REG_PARAMETERS_5 0x0024
+
+/* QCOM ICE v3.X only */
+#define QCOM_ICE_GENERAL_ERR_STTS 0x0040
+#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030
+#define QCOM_ICE_GENERAL_ERR_MASK 0x0044
+
+/* QCOM ICE v2.X only */
+#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040
+#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044
+
+#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048
+#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050
+#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054
+#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058
+#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C
+#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060
+#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064
+#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068
+#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C
+#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_REG_BYPASS_STATUS 0x0074
+#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
+#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004
+#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010
+#define QCOM_ICE_REG_TEST_BUS_REG 0x1014
+
+/* BIST ("built-in self-test"?) status flags */
+#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
+
+#define QCOM_ICE_FUSE_SETTING_MASK 0x1
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+#define qcom_ice_writel(host, val, reg) \
+ writel((val), (host)->ice_mmio + (reg))
+#define qcom_ice_readl(host, reg) \
+ readl((host)->ice_mmio + (reg))
+
+static bool qcom_ice_supported(struct ufs_qcom_host *host)
+{
+ struct device *dev = host->hba->dev;
+ u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION);
+ int major = regval >> 24;
+ int minor = (regval >> 16) & 0xFF;
+ int step = regval & 0xFFFF;
+
+ /* For now this driver only supports ICE version 3. */
+ if (major != 3) {
+ dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
+ major, minor, step);
+ return false;
+ }
+
+ dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
+ major, minor, step);
+
+ /* If fuses are blown, ICE might not work in the standard way. */
+ regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING);
+ if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
+ dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
+ return false;
+ }
+ return true;
+}
+
+int ufs_qcom_ice_init(struct ufs_qcom_host *host)
+{
+ struct ufs_hba *hba = host->hba;
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ int err;
+
+ if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) &
+ MASK_CRYPTO_SUPPORT))
+ return 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
+ if (!res) {
+ dev_warn(dev, "ICE registers not found\n");
+ goto disable;
+ }
+
+ if (!qcom_scm_ice_available()) {
+ dev_warn(dev, "ICE SCM interface not found\n");
+ goto disable;
+ }
+
+ host->ice_mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->ice_mmio)) {
+ err = PTR_ERR(host->ice_mmio);
+ return err;
+ }
+
+ if (!qcom_ice_supported(host))
+ goto disable;
+
+ return 0;
+
+disable:
+ dev_warn(dev, "Disabling inline encryption support\n");
+ hba->caps &= ~UFSHCD_CAP_CRYPTO;
+ return 0;
+}
+
+static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL);
+ /*
+ * Enable low power mode sequence
+ * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+ */
+ regval |= 0x7000;
+ qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+}
+
+static void qcom_ice_optimization_enable(struct ufs_qcom_host *host)
+{
+ u32 regval;
+
+ /* ICE Optimizations Enable Sequence */
+ regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL);
+ regval |= 0xD807100;
+ /* ICE HPG requires delay before writing */
+ udelay(5);
+ qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+ udelay(5);
+}
+
+int ufs_qcom_ice_enable(struct ufs_qcom_host *host)
+{
+ if (!(host->hba->caps & UFSHCD_CAP_CRYPTO))
+ return 0;
+ qcom_ice_low_power_mode_enable(host);
+ qcom_ice_optimization_enable(host);
+ return ufs_qcom_ice_resume(host);
+}
+
+/* Poll until all BIST bits are reset */
+static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host)
+{
+ int count;
+ u32 reg;
+
+ for (count = 0; count < 100; count++) {
+ reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS);
+ if (!(reg & QCOM_ICE_BIST_STATUS_MASK))
+ break;
+ udelay(50);
+ }
+ if (reg)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
+{
+ int err;
+
+ if (!(host->hba->caps & UFSHCD_CAP_CRYPTO))
+ return 0;
+
+ err = qcom_ice_wait_bist_status(host);
+ if (err) {
+ dev_err(host->hba->dev, "BIST status error (%d)\n", err);
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
+ * vendor-specific SCM calls for this; it doesn't support the standard way.
+ */
+int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot)
+{
+ union ufs_crypto_cap_entry cap;
+ union {
+ u8 bytes[AES_256_XTS_KEY_SIZE];
+ u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
+ } key;
+ int i;
+ int err;
+
+ if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE))
+ return qcom_scm_ice_invalidate_key(slot);
+
+ /* Only AES-256-XTS has been tested so far. */
+ cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
+ if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
+ cap.key_size != UFS_CRYPTO_KEY_SIZE_256) {
+ dev_err_ratelimited(hba->dev,
+ "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
+ cap.algorithm_id, cap.key_size);
+ return -EINVAL;
+ }
+
+ memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
+
+ /*
+ * The SCM call byte-swaps the 32-bit words of the key. So we have to
+ * do the same, in order for the final key be correct.
+ */
+ for (i = 0; i < ARRAY_SIZE(key.words); i++)
+ __cpu_to_be32s(&key.words[i]);
+
+ err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ cfg->data_unit_size);
+ memzero_explicit(&key, sizeof(key));
+ return err;
+}
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
new file mode 100644
index 000000000..8ad1415e1
--- /dev/null
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -0,0 +1,1520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/gpio/consumer.h>
+#include <linux/reset-controller.h>
+#include <linux/devfreq.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include <ufs/unipro.h>
+#include "ufs-qcom.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
+
+#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
+ (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+
+enum {
+ TSTBUS_UAWM,
+ TSTBUS_UARM,
+ TSTBUS_TXUC,
+ TSTBUS_RXUC,
+ TSTBUS_DFC,
+ TSTBUS_TRLUT,
+ TSTBUS_TMRLUT,
+ TSTBUS_OCSC,
+ TSTBUS_UTP_HCI,
+ TSTBUS_COMBINED,
+ TSTBUS_WRAPPER,
+ TSTBUS_UNIPRO,
+ TSTBUS_MAX,
+};
+
+static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
+
+static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
+static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
+ u32 clk_cycles);
+
+static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
+{
+ return container_of(rcd, struct ufs_qcom_host, rcdev);
+}
+
+static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
+ const char *prefix, void *priv)
+{
+ ufshcd_dump_regs(hba, offset, len * 4, prefix);
+}
+
+static int ufs_qcom_host_clk_get(struct device *dev,
+ const char *name, struct clk **clk_out, bool optional)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (!IS_ERR(clk)) {
+ *clk_out = clk;
+ return 0;
+ }
+
+ err = PTR_ERR(clk);
+
+ if (optional && err == -ENOENT) {
+ *clk_out = NULL;
+ return 0;
+ }
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s err %d\n", name, err);
+
+ return err;
+}
+
+static int ufs_qcom_host_clk_enable(struct device *dev,
+ const char *name, struct clk *clk)
+{
+ int err = 0;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
+
+ return err;
+}
+
+static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
+{
+ if (!host->is_lane_clks_enabled)
+ return;
+
+ clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+
+ host->is_lane_clks_enabled = false;
+}
+
+static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ if (host->is_lane_clks_enabled)
+ return 0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
+ host->rx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
+ host->tx_l0_sync_clk);
+ if (err)
+ goto disable_rx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ host->rx_l1_sync_clk);
+ if (err)
+ goto disable_tx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
+ if (err)
+ goto disable_rx_l1;
+
+ host->is_lane_clks_enabled = true;
+ goto out;
+
+disable_rx_l1:
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+disable_tx_l0:
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+disable_rx_l0:
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+out:
+ return err;
+}
+
+static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ if (has_acpi_companion(dev))
+ return 0;
+
+ err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+ &host->rx_l0_sync_clk, false);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+ &host->tx_l0_sync_clk, false);
+ if (err)
+ goto out;
+
+ /* In case of single lane per direction, don't read lane1 clocks */
+ if (host->hba->lanes_per_direction > 1) {
+ err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+ &host->rx_l1_sync_clk, false);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk, true);
+ }
+out:
+ return err;
+}
+
+static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
+{
+ int err;
+ u32 tx_fsm_val = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &tx_fsm_val);
+ if (err || tx_fsm_val == TX_FSM_HIBERN8)
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout))
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &tx_fsm_val);
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val != TX_FSM_HIBERN8) {
+ err = tx_fsm_val;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
+ __func__, err);
+ }
+
+ return err;
+}
+
+static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
+{
+ ufshcd_rmwl(host->hba, QUNIPRO_SEL,
+ ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
+ REG_UFS_CFG1);
+ /* make sure above configuration is applied before we return */
+ mb();
+}
+
+/*
+ * ufs_qcom_host_reset - reset host controller and PHY
+ */
+static int ufs_qcom_host_reset(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ bool reenable_intr = false;
+
+ if (!host->core_reset) {
+ dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ goto out;
+ }
+
+ reenable_intr = hba->is_irq_enabled;
+ disable_irq(hba->irq);
+ hba->is_irq_enabled = false;
+
+ ret = reset_control_assert(host->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(host->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+ usleep_range(1000, 1100);
+
+ if (reenable_intr) {
+ enable_irq(hba->irq);
+ hba->is_irq_enabled = true;
+ }
+
+out:
+ return ret;
+}
+
+static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+ bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
+
+ /* Reset UFS Host Controller and PHY */
+ ret = ufs_qcom_host_reset(hba);
+ if (ret)
+ dev_warn(hba->dev, "%s: host reset returned %d\n",
+ __func__, ret);
+
+ if (is_rate_B)
+ phy_set_mode(phy, PHY_MODE_UFS_HS_B);
+
+ /* phy initialization - calibrate the phy */
+ ret = phy_init(phy);
+ if (ret) {
+ dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /* power on phy - start serdes and phy's power and clocks */
+ ret = phy_power_on(phy);
+ if (ret) {
+ dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
+ __func__, ret);
+ goto out_disable_phy;
+ }
+
+ ufs_qcom_select_unipro_mode(host);
+
+ return 0;
+
+out_disable_phy:
+ phy_exit(phy);
+out:
+ return ret;
+}
+
+/*
+ * The UTP controller has a number of internal clock gating cells (CGCs).
+ * Internal hardware sub-modules within the UTP controller control the CGCs.
+ * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
+ * in a specific operation, UTP controller CGCs are by default disabled and
+ * this function enables them (after every UFS link startup) to save some power
+ * leakage.
+ */
+static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
+ REG_UFS_CFG2);
+
+ /* Ensure that HW clock gating is enabled before next operations */
+ mb();
+}
+
+static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_qcom_power_up_sequence(hba);
+ /*
+ * The PHY PLL output is the source of tx/rx lane symbol
+ * clocks, hence, enable the lane clocks only after PHY
+ * is initialized.
+ */
+ err = ufs_qcom_enable_lane_clks(host);
+ break;
+ case POST_CHANGE:
+ /* check if UFS PHY moved from DISABLED to HIBERN8 */
+ err = ufs_qcom_check_hibern8(hba);
+ ufs_qcom_enable_hw_clk_gating(hba);
+ ufs_qcom_ice_enable(host);
+ break;
+ default:
+ dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/*
+ * Returns zero for success and non-zero in case of a failure
+ */
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ u32 hs, u32 rate, bool update_link_startup_timer)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_clk_info *clki;
+ u32 core_clk_period_in_ns;
+ u32 tx_clk_cycles_per_us = 0;
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_cycles_per_us = 0;
+
+ static u32 pwm_fr_table[][2] = {
+ {UFS_PWM_G1, 0x1},
+ {UFS_PWM_G2, 0x1},
+ {UFS_PWM_G3, 0x1},
+ {UFS_PWM_G4, 0x1},
+ };
+
+ static u32 hs_fr_table_rA[][2] = {
+ {UFS_HS_G1, 0x1F},
+ {UFS_HS_G2, 0x3e},
+ {UFS_HS_G3, 0x7D},
+ };
+
+ static u32 hs_fr_table_rB[][2] = {
+ {UFS_HS_G1, 0x24},
+ {UFS_HS_G2, 0x49},
+ {UFS_HS_G3, 0x92},
+ };
+
+ /*
+ * The Qunipro controller does not use following registers:
+ * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
+ * UFS_REG_PA_LINK_STARTUP_TIMER
+ * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
+ * Aggregation logic.
+ */
+ if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+ goto out;
+
+ if (gear == 0) {
+ dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
+ goto out_error;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk"))
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
+
+ /* If frequency is smaller than 1MHz, set to 1MHz */
+ if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
+ core_clk_rate = DEFAULT_CLK_RATE_HZ;
+
+ core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
+ ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
+ /*
+ * make sure above write gets applied before we return from
+ * this function.
+ */
+ mb();
+ }
+
+ if (ufs_qcom_cap_qunipro(host))
+ goto out;
+
+ core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
+ core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
+ core_clk_period_in_ns &= MASK_CLK_NS_REG;
+
+ switch (hs) {
+ case FASTAUTO_MODE:
+ case FAST_MODE:
+ if (rate == PA_HS_MODE_A) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rA));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
+ } else if (rate == PA_HS_MODE_B) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rB));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
+ } else {
+ dev_err(hba->dev, "%s: invalid rate = %d\n",
+ __func__, rate);
+ goto out_error;
+ }
+ break;
+ case SLOWAUTO_MODE:
+ case SLOW_MODE:
+ if (gear > ARRAY_SIZE(pwm_fr_table)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(pwm_fr_table));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
+ break;
+ case UNCHANGED:
+ default:
+ dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
+ goto out_error;
+ }
+
+ if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
+ (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
+ /* this register 2 fields shall be written at once */
+ ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
+ REG_UFS_TX_SYMBOL_CLK_NS_US);
+ /*
+ * make sure above write gets applied before we return from
+ * this function.
+ */
+ mb();
+ }
+
+ if (update_link_startup_timer) {
+ ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
+ REG_UFS_PA_LINK_STARTUP_TIMER);
+ /*
+ * make sure that this configuration is applied before
+ * we return
+ */
+ mb();
+ }
+ goto out;
+
+out_error:
+ ret = -EINVAL;
+out:
+ return ret;
+}
+
+static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
+ 0, true)) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ufs_qcom_cap_qunipro(host))
+ /*
+ * set unipro core clock cycles to 150 & clear clock
+ * divider
+ */
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
+ 150);
+
+ /*
+ * Some UFS devices (and may be host) have issues if LCC is
+ * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
+ * before link startup which will make sure that both host
+ * and device TX LCC are disabled once link startup is
+ * completed.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
+ err = ufshcd_disable_host_tx_lcc(hba);
+
+ break;
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* reset gpio is optional */
+ if (!host->device_reset)
+ return;
+
+ gpiod_set_value_cansleep(host->device_reset, asserted);
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+
+ if (status == PRE_CHANGE)
+ return 0;
+
+ if (ufs_qcom_is_link_off(hba)) {
+ /*
+ * Disable the tx/rx lane symbol clocks before PHY is
+ * powered down as the PLL source should be disabled
+ * after downstream clocks are disabled.
+ */
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(phy);
+
+ /* reset the connected UFS device during power down */
+ ufs_qcom_device_reset_ctrl(hba, true);
+
+ } else if (!ufs_qcom_is_link_active(hba)) {
+ ufs_qcom_disable_lane_clks(host);
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int err;
+
+ if (ufs_qcom_is_link_off(hba)) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "%s: failed PHY power on: %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
+
+ } else if (!ufs_qcom_is_link_active(hba)) {
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
+ }
+
+ return ufs_qcom_ice_resume(host);
+}
+
+static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
+{
+ if (host->dev_ref_clk_ctrl_mmio &&
+ (enable ^ host->is_dev_ref_clk_enabled)) {
+ u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
+
+ if (enable)
+ temp |= host->dev_ref_clk_en_mask;
+ else
+ temp &= ~host->dev_ref_clk_en_mask;
+
+ /*
+ * If we are here to disable this clock it might be immediately
+ * after entering into hibern8 in which case we need to make
+ * sure that device ref_clk is active for specific time after
+ * hibern8 enter.
+ */
+ if (!enable) {
+ unsigned long gating_wait;
+
+ gating_wait = host->hba->dev_info.clk_gating_wait_us;
+ if (!gating_wait) {
+ udelay(1);
+ } else {
+ /*
+ * bRefClkGatingWaitTime defines the minimum
+ * time for which the reference clock is
+ * required by device during transition from
+ * HS-MODE to LS-MODE or HIBERN8 state. Give it
+ * more delay to be on the safe side.
+ */
+ gating_wait += 10;
+ usleep_range(gating_wait, gating_wait + 10);
+ }
+ }
+
+ writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
+
+ /*
+ * Make sure the write to ref_clk reaches the destination and
+ * not stored in a Write Buffer (WB).
+ */
+ readl(host->dev_ref_clk_ctrl_mmio);
+
+ /*
+ * If we call hibern8 exit after this, we need to make sure that
+ * device ref_clk is stable for at least 1us before the hibern8
+ * exit command.
+ */
+ if (enable)
+ udelay(1);
+
+ host->is_dev_ref_clk_enabled = enable;
+ }
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_dev_params ufs_qcom_cap;
+ int ret = 0;
+
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
+ ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
+
+ if (host->hw_ver.major == 0x1) {
+ /*
+ * HS-G3 operations may not reliably work on legacy QCOM
+ * UFS host controller hardware even though capability
+ * exchange during link startup phase may end up
+ * negotiating maximum supported gear as G3.
+ * Hence downgrade the maximum supported gear to HS-G2.
+ */
+ if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
+ ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
+ if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
+ ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
+ }
+
+ ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
+ dev_max_params,
+ dev_req_params);
+ if (ret) {
+ pr_err("%s: failed to determine capabilities\n",
+ __func__);
+ goto out;
+ }
+
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+ ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
+
+ if (host->hw_ver.major >= 0x4) {
+ ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+ break;
+ case POST_CHANGE:
+ if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate, false)) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ /*
+ * we return error code at the end of the routine,
+ * but continue to configure UFS_PHY_TX_LANE_ENABLE
+ * and bus voting as usual
+ */
+ ret = -EINVAL;
+ }
+
+ /* cache the power mode parameters to use internally */
+ memcpy(&host->dev_req_params,
+ dev_req_params, sizeof(*dev_req_params));
+
+ /* disable the device ref clock if entered PWM mode */
+ if (ufshcd_is_hs_mode(&hba->pwr_info) &&
+ !ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
+ hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+
+ return err;
+}
+
+static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x1)
+ return ufshci_version(1, 1);
+ else
+ return ufshci_version(2, 0);
+}
+
+/**
+ * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
+ * @hba: host controller instance
+ *
+ * QCOM UFS host controller might have some non standard behaviours (quirks)
+ * than what is specified by UFSHCI specification. Advertise all such
+ * quirks to standard UFS host controller driver so standard takes them into
+ * account.
+ */
+static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x01) {
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+ | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
+ | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+
+ if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
+ }
+
+ if (host->hw_ver.major == 0x2) {
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
+
+ if (!ufs_qcom_cap_qunipro(host))
+ /* Legacy UniPro mode still need following quirks */
+ hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+ | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
+ | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
+ }
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ if (host->hw_ver.major >= 0x2) {
+ host->caps = UFS_QCOM_CAP_QUNIPRO |
+ UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
+ }
+}
+
+/**
+ * ufs_qcom_setup_clocks - enables/disable clocks
+ * @hba: host controller instance
+ * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_setup_clocks() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (!on) {
+ if (!ufs_qcom_is_link_active(hba)) {
+ /* disable device ref_clk */
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
+ }
+ }
+ break;
+ case POST_CHANGE:
+ if (on) {
+ /* enable the device ref clock for HS mode*/
+ if (ufshcd_is_hs_mode(&hba->pwr_info))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int
+ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+ /* Currently this code only knows about a single reset. */
+ WARN_ON(id);
+ ufs_qcom_assert_reset(host->hba);
+ /* provide 1ms delay to let the reset pulse propagate. */
+ usleep_range(1000, 1100);
+ return 0;
+}
+
+static int
+ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+ /* Currently this code only knows about a single reset. */
+ WARN_ON(id);
+ ufs_qcom_deassert_reset(host->hba);
+
+ /*
+ * after reset deassertion, phy will need all ref clocks,
+ * voltage, current to settle down before starting serdes.
+ */
+ usleep_range(1000, 1100);
+ return 0;
+}
+
+static const struct reset_control_ops ufs_qcom_reset_ops = {
+ .assert = ufs_qcom_reset_assert,
+ .deassert = ufs_qcom_reset_deassert,
+};
+
+/**
+ * ufs_qcom_init - bind phy with controller
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_qcom_init(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_qcom_host *host;
+ struct resource *res;
+ struct ufs_clk_info *clki;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ err = -ENOMEM;
+ dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+ goto out;
+ }
+
+ /* Make a two way bind between the qcom host and the hba */
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ /* Setup the optional reset control of HCI */
+ host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
+ if (IS_ERR(host->core_reset)) {
+ err = dev_err_probe(dev, PTR_ERR(host->core_reset),
+ "Failed to get reset control\n");
+ goto out_variant_clear;
+ }
+
+ /* Fire up the reset controller. Failure here is non-fatal. */
+ host->rcdev.of_node = dev->of_node;
+ host->rcdev.ops = &ufs_qcom_reset_ops;
+ host->rcdev.owner = dev->driver->owner;
+ host->rcdev.nr_resets = 1;
+ err = devm_reset_controller_register(dev, &host->rcdev);
+ if (err) {
+ dev_warn(dev, "Failed to register reset controller\n");
+ err = 0;
+ }
+
+ if (!has_acpi_companion(dev)) {
+ host->generic_phy = devm_phy_get(dev, "ufsphy");
+ if (IS_ERR(host->generic_phy)) {
+ err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
+ goto out_variant_clear;
+ }
+ }
+
+ host->device_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(host->device_reset)) {
+ err = PTR_ERR(host->device_reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to acquire reset gpio: %d\n", err);
+ goto out_variant_clear;
+ }
+
+ ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
+ &host->hw_ver.minor, &host->hw_ver.step);
+
+ /*
+ * for newer controllers, device reference clock control bit has
+ * moved inside UFS controller register address space itself.
+ */
+ if (host->hw_ver.major >= 0x02) {
+ host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
+ host->dev_ref_clk_en_mask = BIT(26);
+ } else {
+ /* "dev_ref_clk_ctrl_mem" is optional resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dev_ref_clk_ctrl_mem");
+ if (res) {
+ host->dev_ref_clk_ctrl_mmio =
+ devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
+ host->dev_ref_clk_ctrl_mmio = NULL;
+ host->dev_ref_clk_en_mask = BIT(5);
+ }
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk_unipro"))
+ clki->keep_link_active = true;
+ }
+
+ err = ufs_qcom_init_lane_clks(host);
+ if (err)
+ goto out_variant_clear;
+
+ ufs_qcom_set_caps(hba);
+ ufs_qcom_advertise_quirks(hba);
+
+ err = ufs_qcom_ice_init(host);
+ if (err)
+ goto out_variant_clear;
+
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
+
+ if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
+ ufs_qcom_hosts[hba->dev->id] = host;
+
+ host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
+ ufs_qcom_get_default_testbus_cfg(host);
+ err = ufs_qcom_testbus_config(host);
+ if (err) {
+ dev_warn(dev, "%s: failed to configure the testbus %d\n",
+ __func__, err);
+ err = 0;
+ }
+
+ goto out;
+
+out_variant_clear:
+ ufshcd_set_variant(hba, NULL);
+out:
+ return err;
+}
+
+static void ufs_qcom_exit(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
+}
+
+static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
+ u32 clk_cycles)
+{
+ int err;
+ u32 core_clk_ctrl_reg;
+
+ if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
+ return -EINVAL;
+
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
+ &core_clk_ctrl_reg);
+ if (err)
+ goto out;
+
+ core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
+ core_clk_ctrl_reg |= clk_cycles;
+
+ /* Clear CORE_CLK_DIV_EN */
+ core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
+ core_clk_ctrl_reg);
+out:
+ return err;
+}
+
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+{
+ /* nothing to do as of now */
+ return 0;
+}
+
+static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!ufs_qcom_cap_qunipro(host))
+ return 0;
+
+ /* set unipro core clock cycles to 150 and clear clock divider */
+ return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+}
+
+static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err;
+ u32 core_clk_ctrl_reg;
+
+ if (!ufs_qcom_cap_qunipro(host))
+ return 0;
+
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
+ &core_clk_ctrl_reg);
+
+ /* make sure CORE_CLK_DIV_EN is cleared */
+ if (!err &&
+ (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
+ core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
+ core_clk_ctrl_reg);
+ }
+
+ return err;
+}
+
+static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!ufs_qcom_cap_qunipro(host))
+ return 0;
+
+ /* set unipro core clock cycles to 75 and clear clock divider */
+ return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+}
+
+static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
+ bool scale_up, enum ufs_notify_change_status status)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
+ int err = 0;
+
+ if (status == PRE_CHANGE) {
+ err = ufshcd_uic_hibern8_enter(hba);
+ if (err)
+ return err;
+ if (scale_up)
+ err = ufs_qcom_clk_scale_up_pre_change(hba);
+ else
+ err = ufs_qcom_clk_scale_down_pre_change(hba);
+ if (err)
+ ufshcd_uic_hibern8_exit(hba);
+
+ } else {
+ if (scale_up)
+ err = ufs_qcom_clk_scale_up_post_change(hba);
+ else
+ err = ufs_qcom_clk_scale_down_post_change(hba);
+
+
+ if (err || !dev_req_params) {
+ ufshcd_uic_hibern8_exit(hba);
+ goto out;
+ }
+
+ ufs_qcom_cfg_timers(hba,
+ dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate,
+ false);
+ ufshcd_uic_hibern8_exit(hba);
+ }
+
+out:
+ return err;
+}
+
+static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
+ void *priv, void (*print_fn)(struct ufs_hba *hba,
+ int offset, int num_regs, const char *str, void *priv))
+{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ if (unlikely(!hba)) {
+ pr_err("%s: hba is NULL\n", __func__);
+ return;
+ }
+ if (unlikely(!print_fn)) {
+ dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
+ return;
+ }
+
+ host = ufshcd_get_variant(hba);
+ if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
+ return;
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UTP_DBG_RAMS_EN;
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
+
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+}
+
+static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
+{
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
+ } else {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+ }
+}
+
+static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
+{
+ /* provide a legal default configuration */
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ host->testbus.select_minor = 37;
+}
+
+static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+{
+ if (host->testbus.select_major >= TSTBUS_MAX) {
+ dev_err(host->hba->dev,
+ "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
+ __func__, host->testbus.select_major);
+ return false;
+ }
+
+ return true;
+}
+
+int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
+{
+ int reg;
+ int offset;
+ u32 mask = TEST_BUS_SUB_SEL_MASK;
+
+ if (!host)
+ return -EINVAL;
+
+ if (!ufs_qcom_testbus_cfg_is_ok(host))
+ return -EPERM;
+
+ switch (host->testbus.select_major) {
+ case TSTBUS_UAWM:
+ reg = UFS_TEST_BUS_CTRL_0;
+ offset = 24;
+ break;
+ case TSTBUS_UARM:
+ reg = UFS_TEST_BUS_CTRL_0;
+ offset = 16;
+ break;
+ case TSTBUS_TXUC:
+ reg = UFS_TEST_BUS_CTRL_0;
+ offset = 8;
+ break;
+ case TSTBUS_RXUC:
+ reg = UFS_TEST_BUS_CTRL_0;
+ offset = 0;
+ break;
+ case TSTBUS_DFC:
+ reg = UFS_TEST_BUS_CTRL_1;
+ offset = 24;
+ break;
+ case TSTBUS_TRLUT:
+ reg = UFS_TEST_BUS_CTRL_1;
+ offset = 16;
+ break;
+ case TSTBUS_TMRLUT:
+ reg = UFS_TEST_BUS_CTRL_1;
+ offset = 8;
+ break;
+ case TSTBUS_OCSC:
+ reg = UFS_TEST_BUS_CTRL_1;
+ offset = 0;
+ break;
+ case TSTBUS_WRAPPER:
+ reg = UFS_TEST_BUS_CTRL_2;
+ offset = 16;
+ break;
+ case TSTBUS_COMBINED:
+ reg = UFS_TEST_BUS_CTRL_2;
+ offset = 8;
+ break;
+ case TSTBUS_UTP_HCI:
+ reg = UFS_TEST_BUS_CTRL_2;
+ offset = 0;
+ break;
+ case TSTBUS_UNIPRO:
+ reg = UFS_UNIPRO_CFG;
+ offset = 20;
+ mask = 0xFFF;
+ break;
+ /*
+ * No need for a default case, since
+ * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
+ * is legal
+ */
+ }
+ mask <<= offset;
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << 19,
+ REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, mask,
+ (u32)host->testbus.select_minor << offset,
+ reg);
+ ufs_qcom_enable_test_bus(host);
+ /*
+ * Make sure the test bus configuration is
+ * committed before returning.
+ */
+ mb();
+
+ return 0;
+}
+
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
+ "HCI Vendor Specific Registers ");
+
+ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+}
+
+/**
+ * ufs_qcom_device_reset() - toggle the (optional) device reset line
+ * @hba: per-adapter instance
+ *
+ * Toggles the (optional) reset line to reset the attached device.
+ */
+static int ufs_qcom_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* reset gpio is optional */
+ if (!host->device_reset)
+ return -EOPNOTSUPP;
+
+ /*
+ * The UFS device shall detect reset pulses of 1us, sleep for 10us to
+ * be on the safe side.
+ */
+ ufs_qcom_device_reset_ctrl(hba, true);
+ usleep_range(10, 15);
+
+ ufs_qcom_device_reset_ctrl(hba, false);
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ struct devfreq_simple_ondemand_data *d)
+{
+ p->polling_ms = 60;
+ d->upthreshold = 70;
+ d->downdifferential = 5;
+}
+#else
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ struct devfreq_simple_ondemand_data *data)
+{
+}
+#endif
+
+/*
+ * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+ .name = "qcom",
+ .init = ufs_qcom_init,
+ .exit = ufs_qcom_exit,
+ .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
+ .clk_scale_notify = ufs_qcom_clk_scale_notify,
+ .setup_clocks = ufs_qcom_setup_clocks,
+ .hce_enable_notify = ufs_qcom_hce_enable_notify,
+ .link_startup_notify = ufs_qcom_link_startup_notify,
+ .pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
+ .suspend = ufs_qcom_suspend,
+ .resume = ufs_qcom_resume,
+ .dbg_register_dump = ufs_qcom_dump_dbg_regs,
+ .device_reset = ufs_qcom_device_reset,
+ .config_scaling_param = ufs_qcom_config_scaling_param,
+ .program_key = ufs_qcom_ice_program_key,
+};
+
+/**
+ * ufs_qcom_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int ufs_qcom_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ /* Perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * ufs_qcom_remove - set driver_data of the device to NULL
+ * @pdev: pointer to platform device handle
+ *
+ * Always returns 0
+ */
+static int ufs_qcom_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_qcom_of_match[] = {
+ { .compatible = "qcom,ufshc"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ufs_qcom_acpi_match[] = {
+ { "QCOM24A5" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
+#endif
+
+static const struct dev_pm_ops ufs_qcom_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_qcom_pltform = {
+ .probe = ufs_qcom_probe,
+ .remove = ufs_qcom_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-qcom",
+ .pm = &ufs_qcom_pm_ops,
+ .of_match_table = of_match_ptr(ufs_qcom_of_match),
+ .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
+ },
+};
+module_platform_driver(ufs_qcom_pltform);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
new file mode 100644
index 000000000..44466a395
--- /dev/null
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef UFS_QCOM_H_
+#define UFS_QCOM_H_
+
+#include <linux/reset-controller.h>
+#include <linux/reset.h>
+#include <ufs/ufshcd.h>
+
+#define MAX_UFS_QCOM_HOSTS 1
+#define MAX_U32 (~(u32)0)
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+#define HBRN8_POLL_TOUT_MS 100
+#define DEFAULT_CLK_RATE_HZ 1000000
+#define BUS_VECTOR_NAME_LEN 32
+
+#define UFS_HW_VER_MAJOR_SHFT (28)
+#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
+#define UFS_HW_VER_MINOR_SHFT (16)
+#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
+#define UFS_HW_VER_STEP_SHFT (0)
+#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+
+/* QCOM UFS host controller vendor specific registers */
+enum {
+ REG_UFS_SYS1CLK_1US = 0xC0,
+ REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
+ REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
+ REG_UFS_PA_ERR_CODE = 0xCC,
+ REG_UFS_RETRY_TIMER_REG = 0xD0,
+ REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ REG_UFS_CFG1 = 0xDC,
+ REG_UFS_CFG2 = 0xE0,
+ REG_UFS_HW_VERSION = 0xE4,
+
+ UFS_TEST_BUS = 0xE8,
+ UFS_TEST_BUS_CTRL_0 = 0xEC,
+ UFS_TEST_BUS_CTRL_1 = 0xF0,
+ UFS_TEST_BUS_CTRL_2 = 0xF4,
+ UFS_UNIPRO_CFG = 0xF8,
+
+ /*
+ * QCOM UFS host controller vendor specific registers
+ * added in HW Version 3.0.0
+ */
+ UFS_AH8_CFG = 0xFC,
+};
+
+/* QCOM UFS host controller vendor specific debug registers */
+enum {
+ UFS_DBG_RD_REG_UAWM = 0x100,
+ UFS_DBG_RD_REG_UARM = 0x200,
+ UFS_DBG_RD_REG_TXUC = 0x300,
+ UFS_DBG_RD_REG_RXUC = 0x400,
+ UFS_DBG_RD_REG_DFC = 0x500,
+ UFS_DBG_RD_REG_TRLUT = 0x600,
+ UFS_DBG_RD_REG_TMRLUT = 0x700,
+ UFS_UFS_DBG_RD_REG_OCSC = 0x800,
+
+ UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
+ UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
+ UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
+ UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
+};
+
+#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
+#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
+
+/* bit definitions for REG_UFS_CFG1 register */
+#define QUNIPRO_SEL 0x1
+#define UTP_DBG_RAMS_EN 0x20000
+#define TEST_BUS_EN BIT(18)
+#define TEST_BUS_SEL GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN BIT(30)
+
+/* bit definitions for REG_UFS_CFG2 register */
+#define UAWM_HW_CGC_EN (1 << 0)
+#define UARM_HW_CGC_EN (1 << 1)
+#define TXUC_HW_CGC_EN (1 << 2)
+#define RXUC_HW_CGC_EN (1 << 3)
+#define DFC_HW_CGC_EN (1 << 4)
+#define TRLUT_HW_CGC_EN (1 << 5)
+#define TMRLUT_HW_CGC_EN (1 << 6)
+#define OCSC_HW_CGC_EN (1 << 7)
+
+/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
+#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
+
+#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
+ TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
+ DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
+ TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
+
+/* bit offset */
+enum {
+ OFFSET_UFS_PHY_SOFT_RESET = 1,
+ OFFSET_CLK_NS_REG = 10,
+};
+
+/* bit masks */
+enum {
+ MASK_UFS_PHY_SOFT_RESET = 0x2,
+ MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
+ MASK_CLK_NS_REG = 0xFFFC00,
+};
+
+/* QCOM UFS debug print bit mask */
+#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
+#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
+#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
+
+#define UFS_QCOM_DBG_PRINT_ALL \
+ (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
+ UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+
+/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
+#define DME_VS_CORE_CLK_CTRL 0xD002
+/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
+#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
+
+static inline void
+ufs_qcom_get_controller_revision(struct ufs_hba *hba,
+ u8 *major, u16 *minor, u16 *step)
+{
+ u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
+
+ *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
+ *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
+ *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+};
+
+static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure de-assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+/* Host controller hardware version: major.minor.step */
+struct ufs_hw_version {
+ u16 step;
+ u16 minor;
+ u8 major;
+};
+
+struct ufs_qcom_testbus {
+ u8 select_major;
+ u8 select_minor;
+};
+
+struct gpio_desc;
+
+struct ufs_qcom_host {
+ /*
+ * Set this capability if host controller supports the QUniPro mode
+ * and if driver wants the Host controller to operate in QUniPro mode.
+ * Note: By default this capability will be kept enabled if host
+ * controller supports the QUniPro mode.
+ */
+ #define UFS_QCOM_CAP_QUNIPRO 0x1
+
+ /*
+ * Set this capability if host controller can retain the secure
+ * configuration even after UFS controller core power collapse.
+ */
+ #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
+ u32 caps;
+
+ struct phy *generic_phy;
+ struct ufs_hba *hba;
+ struct ufs_pa_layer_attr dev_req_params;
+ struct clk *rx_l0_sync_clk;
+ struct clk *tx_l0_sync_clk;
+ struct clk *rx_l1_sync_clk;
+ struct clk *tx_l1_sync_clk;
+ bool is_lane_clks_enabled;
+
+ void __iomem *dev_ref_clk_ctrl_mmio;
+ bool is_dev_ref_clk_enabled;
+ struct ufs_hw_version hw_ver;
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+ void __iomem *ice_mmio;
+#endif
+
+ u32 dev_ref_clk_en_mask;
+
+ /* Bitmask for enabling debug prints */
+ u32 dbg_print_en;
+ struct ufs_qcom_testbus testbus;
+
+ /* Reset control of HCI */
+ struct reset_control *core_reset;
+ struct reset_controller_dev rcdev;
+
+ struct gpio_desc *device_reset;
+};
+
+static inline u32
+ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
+{
+ if (host->hw_ver.major <= 0x02)
+ return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
+
+ return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
+};
+
+#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
+#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
+#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+
+int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
+
+static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
+{
+ return host->caps & UFS_QCOM_CAP_QUNIPRO;
+}
+
+/* ufs-qcom-ice.c */
+
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+int ufs_qcom_ice_init(struct ufs_qcom_host *host);
+int ufs_qcom_ice_enable(struct ufs_qcom_host *host);
+int ufs_qcom_ice_resume(struct ufs_qcom_host *host);
+int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot);
+#else
+static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host)
+{
+ return 0;
+}
+static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host)
+{
+ return 0;
+}
+static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
+{
+ return 0;
+}
+#define ufs_qcom_ice_program_key NULL
+#endif /* !CONFIG_SCSI_UFS_CRYPTO */
+
+#endif /* UFS_QCOM_H_ */
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
new file mode 100644
index 000000000..ab0652d87
--- /dev/null
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Renesas UFS host controller driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <ufs/ufshcd.h>
+
+#include "ufshcd-pltfrm.h"
+
+struct ufs_renesas_priv {
+ bool initialized; /* The hardware needs initialization once */
+};
+
+enum {
+ SET_PHY_INDEX_LO = 0,
+ SET_PHY_INDEX_HI,
+ TIMER_INDEX,
+ MAX_INDEX
+};
+
+enum ufs_renesas_init_param_mode {
+ MODE_RESTORE,
+ MODE_SET,
+ MODE_SAVE,
+ MODE_POLL,
+ MODE_WAIT,
+ MODE_WRITE,
+};
+
+#define PARAM_RESTORE(_reg, _index) \
+ { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
+#define PARAM_SET(_index, _set) \
+ { .mode = MODE_SET, .index = _index, .u.set = _set }
+#define PARAM_SAVE(_reg, _mask, _index) \
+ { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
+ .index = _index }
+#define PARAM_POLL(_reg, _expected, _mask) \
+ { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
+ .mask = (u32)(_mask) }
+#define PARAM_WAIT(_delay_us) \
+ { .mode = MODE_WAIT, .u.delay_us = _delay_us }
+
+#define PARAM_WRITE(_reg, _val) \
+ { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
+
+#define PARAM_WRITE_D0_D4(_d0, _d4) \
+ PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
+
+#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_RESTORE_800_80C_POLL(_index) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE(0xd0, 0x00000800), \
+ PARAM_RESTORE(0xd4, _index), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_828_82C_POLL(_data_828) \
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
+ PARAM_WRITE_D0_D4(0x00000828, _data_828), \
+ PARAM_WRITE(0xd0, 0x0000082c), \
+ PARAM_POLL(0xd4, _data_828, _data_828)
+
+#define PARAM_WRITE_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_SET_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE_804_80C_POLL(0x1a, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
+ PARAM_WRITE_804_80C_POLL(0x1b, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0), \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
+ PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_POLL(0xd4, _expected, _mask), \
+ PARAM_WRITE(0xf0, 0)
+
+struct ufs_renesas_init_param {
+ enum ufs_renesas_init_param_mode mode;
+ u32 reg;
+ union {
+ u32 expected;
+ u32 delay_us;
+ u32 set;
+ u32 val;
+ } u;
+ u32 mask;
+ u32 index;
+};
+
+/* This setting is for SERIES B */
+static const struct ufs_renesas_init_param ufs_param[] = {
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
+
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE(0xc0, 0x41584901),
+
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
+ PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
+ PARAM_WRITE(0xd0, 0x0000080c),
+ PARAM_POLL(0xd4, BIT(8), BIT(8)),
+
+ PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
+
+ PARAM_WRITE(0xd0, 0x00000804),
+ PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
+
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
+ PARAM_WRITE(0xd4, 0x00000000),
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
+ PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
+ PARAM_WRITE(0xd0, 0x0000082c),
+ PARAM_POLL(0xd4, BIT(27), BIT(27)),
+ PARAM_WRITE(0xd0, 0x00000d2c),
+ PARAM_POLL(0xd4, BIT(0), BIT(0)),
+
+ /* phy setup */
+ PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+ PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
+ PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
+
+ PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
+ PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
+
+ PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
+ PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
+
+ PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
+ PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
+ PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
+ PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
+ PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
+ PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
+
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+
+ PARAM_WRITE_PHY(0x0028, 0x0061),
+ PARAM_WRITE_PHY(0x4014, 0x0061),
+ PARAM_SET_PHY(0x401c, BIT(2)),
+ PARAM_WRITE_PHY(0x4000, 0x0000),
+ PARAM_WRITE_PHY(0x4001, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0001),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0002),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x0000),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x001a),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
+ PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
+ PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
+ /* end of phy setup */
+
+ PARAM_WRITE(0xf0, 0),
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_RESTORE(0xd4, TIMER_INDEX),
+};
+
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
+
+static void ufs_renesas_reg_control(struct ufs_hba *hba,
+ const struct ufs_renesas_init_param *p)
+{
+ static u32 save[MAX_INDEX];
+ int ret;
+ u32 val;
+
+ WARN_ON(p->index >= MAX_INDEX);
+
+ switch (p->mode) {
+ case MODE_RESTORE:
+ ufshcd_writel(hba, save[p->index], p->reg);
+ break;
+ case MODE_SET:
+ save[p->index] |= p->u.set;
+ break;
+ case MODE_SAVE:
+ save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
+ break;
+ case MODE_POLL:
+ ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
+ val,
+ (val & p->mask) == p->u.expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, p->mask, p->u.expected);
+ break;
+ case MODE_WAIT:
+ if (p->u.delay_us > 1000)
+ mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
+ else
+ udelay(p->u.delay_us);
+ break;
+ case MODE_WRITE:
+ ufshcd_writel(hba, p->u.val, p->reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ufs_renesas_pre_init(struct ufs_hba *hba)
+{
+ const struct ufs_renesas_init_param *p = ufs_param;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
+ ufs_renesas_reg_control(hba, &p[i]);
+}
+
+static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ if (priv->initialized)
+ return 0;
+
+ if (status == PRE_CHANGE)
+ ufs_renesas_pre_init(hba);
+
+ priv->initialized = true;
+
+ return 0;
+}
+
+static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ if (on && status == PRE_CHANGE)
+ pm_runtime_get_sync(hba->dev);
+ else if (!on && status == POST_CHANGE)
+ pm_runtime_put(hba->dev);
+
+ return 0;
+}
+
+static int ufs_renesas_init(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv;
+
+ priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, priv);
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_renesas_vops = {
+ .name = "renesas",
+ .init = ufs_renesas_init,
+ .setup_clocks = ufs_renesas_setup_clocks,
+ .hce_enable_notify = ufs_renesas_hce_enable_notify,
+ .dbg_register_dump = ufs_renesas_dbg_register_dump,
+};
+
+static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = {
+ { .compatible = "renesas,r8a779f0-ufs" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ufs_renesas_of_match);
+
+static int ufs_renesas_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
+}
+
+static int ufs_renesas_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static struct platform_driver ufs_renesas_platform = {
+ .probe = ufs_renesas_probe,
+ .remove = ufs_renesas_remove,
+ .driver = {
+ .name = "ufshcd-renesas",
+ .of_match_table = of_match_ptr(ufs_renesas_of_match),
+ },
+};
+module_platform_driver(ufs_renesas_platform);
+
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_DESCRIPTION("Renesas UFS host controller driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/ufs/host/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c
new file mode 100644
index 000000000..e28a67e1e
--- /dev/null
+++ b/drivers/ufs/host/ufshcd-dwc.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#include <linux/module.h>
+
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+
+#include "ufshcd-dwc.h"
+#include "ufshci-dwc.h"
+
+int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
+ const struct ufshcd_dme_attr_val *v, int n)
+{
+ int ret = 0;
+ int attr_node = 0;
+
+ for (attr_node = 0; attr_node < n; attr_node++) {
+ ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
+ ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
+
+/**
+ * ufshcd_dwc_program_clk_div()
+ * This function programs the clk divider value. This value is needed to
+ * provide 1 microsecond tick to unipro layer.
+ * @hba: Private Structure pointer
+ * @divider_val: clock divider value to be programmed
+ *
+ */
+static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
+{
+ ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
+}
+
+/**
+ * ufshcd_dwc_link_is_up()
+ * Check if link is up
+ * @hba: private structure pointer
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
+{
+ int dme_result = 0;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
+
+ if (dme_result == UFSHCD_LINK_IS_UP) {
+ ufshcd_set_link_active(hba);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * ufshcd_dwc_connection_setup()
+ * This function configures both the local side (host) and the peer side
+ * (device) unipro attributes to establish the connection to application/
+ * cport.
+ * This function is not required if the hardware is properly configured to
+ * have this connection setup on reset. But invoking this function does no
+ * harm and should be fine even working with any ufs device.
+ *
+ * @hba: pointer to drivers private data
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
+{
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
+ { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
+ { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
+ { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
+ { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
+ { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
+ { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
+ { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
+ { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
+ { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * ufshcd_dwc_link_startup_notify()
+ * UFS Host DWC specific link startup sequence
+ * @hba: private structure pointer
+ * @status: Callback notify status
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ if (status == PRE_CHANGE) {
+ ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
+
+ err = ufshcd_vops_phy_initialization(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n", err);
+ goto out;
+ }
+ } else { /* POST_CHANGE */
+ err = ufshcd_dwc_link_is_up(hba);
+ if (err) {
+ dev_err(hba->dev, "Link is not up\n");
+ goto out;
+ }
+
+ err = ufshcd_dwc_connection_setup(hba);
+ if (err)
+ dev_err(hba->dev, "Connection setup failed (%d)\n",
+ err);
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h
new file mode 100644
index 000000000..ad91ea566
--- /dev/null
+++ b/drivers/ufs/host/ufshcd-dwc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#ifndef _UFSHCD_DWC_H
+#define _UFSHCD_DWC_H
+
+#include <ufs/ufshcd.h>
+
+struct ufshcd_dme_attr_val {
+ u32 attr_sel;
+ u32 mib_val;
+ u8 peer;
+};
+
+int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status);
+int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
+ const struct ufshcd_dme_attr_val *v, int n);
+#endif /* End of Header */
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
new file mode 100644
index 000000000..9c911787f
--- /dev/null
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#include <ufs/ufshcd.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/debugfs.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+
+struct ufs_host {
+ void (*late_init)(struct ufs_hba *hba);
+};
+
+enum intel_ufs_dsm_func_id {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_RESET = 1,
+};
+
+struct intel_host {
+ struct ufs_host ufs_host;
+ u32 dsm_fns;
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs_root;
+ struct gpio_desc *reset_gpio;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
+ 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+
+static bool __intel_dsm_supported(struct intel_host *host,
+ enum intel_ufs_dsm_func_id fn)
+{
+ return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
+}
+
+#define INTEL_DSM_SUPPORTED(host, name) \
+ __intel_dsm_supported(host, INTEL_DSM_##name)
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+ size_t len;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+out:
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (!__intel_dsm_supported(intel_host, fn))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
+}
+
+static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ /* Cannot enable ICE until after HC enable */
+ if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
+ u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+
+ hce |= CRYPTO_GENERAL_ENABLE;
+ ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
+ }
+
+ return 0;
+}
+
+static int ufs_intel_disable_lcc(struct ufs_hba *hba)
+{
+ u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
+ u32 lcc_enable = 0;
+
+ ufshcd_dme_get(hba, attr, &lcc_enable);
+ if (lcc_enable)
+ ufshcd_disable_host_tx_lcc(hba);
+
+ return 0;
+}
+
+static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_intel_disable_lcc(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+ struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+ int ret;
+
+ pwr_info.lane_rx = lanes;
+ pwr_info.lane_tx = lanes;
+ ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+ if (ret)
+ dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+ __func__, lanes, ret);
+ return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (ufshcd_is_hs_mode(dev_max_params) &&
+ (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+ ufs_intel_set_lanes(hba, 2);
+ memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+ break;
+ case POST_CHANGE:
+ if (ufshcd_is_hs_mode(dev_req_params)) {
+ u32 peer_granularity;
+
+ usleep_range(1000, 1250);
+ err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ if (granularity == peer_granularity) {
+ u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+ }
+out:
+ return ret;
+}
+
+#define INTEL_ACTIVELTR 0x804
+#define INTEL_IDLELTR 0x808
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US (2 << 10)
+#define INTEL_LTR_SCALE_32US (3 << 10)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+static void intel_cache_ltr(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+ host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
+}
+
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct intel_host *host = ufshcd_get_variant(hba);
+ u32 ltr;
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
+ }
+
+ if (ltr == host->active_ltr)
+ goto out;
+
+ writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
+ writel(ltr, hba->mmio_base + INTEL_IDLELTR);
+
+ /* Cache the values into intel_host structure */
+ intel_cache_ltr(hba);
+out:
+ pm_runtime_put(dev);
+}
+
+static void intel_ltr_expose(struct device *dev)
+{
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct device *dev)
+{
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
+static void intel_add_debugfs(struct ufs_hba *hba)
+{
+ struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ intel_cache_ltr(hba);
+
+ host->debugfs_root = dir;
+ debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
+}
+
+static void intel_remove_debugfs(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static int ufs_intel_device_reset(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
+ u32 result = 0;
+ int err;
+
+ err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
+ if (!err && !result)
+ err = -EIO;
+ if (err)
+ dev_err(hba->dev, "%s: DSM error %d result %u\n",
+ __func__, err, result);
+ return err;
+ }
+
+ if (!host->reset_gpio)
+ return -EOPNOTSUPP;
+
+ gpiod_set_value_cansleep(host->reset_gpio, 1);
+ usleep_range(10, 15);
+
+ gpiod_set_value_cansleep(host->reset_gpio, 0);
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
+{
+ /* GPIO in _DSD has active low setting */
+ return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+}
+
+static int ufs_intel_common_init(struct ufs_hba *hba)
+{
+ struct intel_host *host;
+
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
+ intel_dsm_init(host, hba->dev);
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
+ if (hba->vops->device_reset)
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ } else {
+ if (hba->vops->device_reset)
+ host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
+ if (IS_ERR(host->reset_gpio)) {
+ dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
+ __func__, PTR_ERR(host->reset_gpio));
+ host->reset_gpio = NULL;
+ }
+ if (host->reset_gpio) {
+ gpiod_set_value_cansleep(host->reset_gpio, 0);
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ }
+ }
+ intel_ltr_expose(hba->dev);
+ intel_add_debugfs(hba);
+ return 0;
+}
+
+static void ufs_intel_common_exit(struct ufs_hba *hba)
+{
+ intel_remove_debugfs(hba);
+ intel_ltr_hide(hba->dev);
+}
+
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ if (ufshcd_is_link_hibern8(hba)) {
+ int ret = ufshcd_uic_hibern8_exit(hba);
+
+ if (!ret) {
+ ufshcd_set_link_active(hba);
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ /*
+ * Force reset and restore. Any other actions can lead
+ * to an unrecoverable state.
+ */
+ ufshcd_set_link_off(hba);
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_intel_ehl_init(struct ufs_hba *hba)
+{
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ return ufs_intel_common_init(hba);
+}
+
+static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+{
+ /* LKF always needs a full reset, so set PM accordingly */
+ if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
+ hba->spm_lvl = UFS_PM_LVL_6;
+ hba->rpm_lvl = UFS_PM_LVL_6;
+ } else {
+ hba->spm_lvl = UFS_PM_LVL_5;
+ hba->rpm_lvl = UFS_PM_LVL_5;
+ }
+}
+
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
+{
+ struct ufs_host *ufs_host;
+ int err;
+
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ err = ufs_intel_common_init(hba);
+ ufs_host = ufshcd_get_variant(hba);
+ ufs_host->late_init = ufs_intel_lkf_late_init;
+ return err;
+}
+
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ return ufs_intel_common_init(hba);
+}
+
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+ hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+ return ufs_intel_common_init(hba);
+}
+
+static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_common_init,
+ .exit = ufs_intel_common_exit,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+};
+
+static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_ehl_init,
+ .exit = ufs_intel_common_exit,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+};
+
+static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_lkf_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
+ .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_adl_init,
+ .exit = ufs_intel_common_exit,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_mtl_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pci_restore(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ /* Force a full reset and restore */
+ ufshcd_set_link_off(hba);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev: pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_host *ufs_host;
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
+ }
+
+ mmio_base = pcim_iomap_table(pdev)[0];
+
+ err = ufshcd_alloc_host(&pdev->dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ return err;
+ }
+
+ hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
+
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
+ return err;
+ }
+
+ ufs_host = ufshcd_get_variant(hba);
+ if (ufs_host && ufs_host->late_init)
+ ufs_host->late_init(hba);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ufshcd_pci_pm_ops = {
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_suspend,
+ .thaw = ufshcd_system_resume,
+ .poweroff = ufshcd_system_suspend,
+ .restore = ufshcd_pci_restore,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+#endif
+};
+
+static const struct pci_device_id ufshcd_pci_tbl[] = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+ .driver = {
+ .pm = &ufshcd_pci_pm_ops
+ },
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
new file mode 100644
index 000000000..5739ff007
--- /dev/null
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include <ufs/unipro.h>
+
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
+
+static int ufshcd_parse_clock_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+ int cnt;
+ int i;
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ const char *name;
+ u32 *clkfreq = NULL;
+ struct ufs_clk_info *clki;
+ int len = 0;
+ size_t sz = 0;
+
+ if (!np)
+ goto out;
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (!cnt || (cnt == -EINVAL)) {
+ dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+ __func__);
+ } else if (cnt < 0) {
+ dev_err(dev, "%s: count clock strings failed, err %d\n",
+ __func__, cnt);
+ ret = cnt;
+ }
+
+ if (cnt <= 0)
+ goto out;
+
+ if (!of_get_property(np, "freq-table-hz", &len)) {
+ dev_info(dev, "freq-table-hz property not specified\n");
+ goto out;
+ }
+
+ if (len <= 0)
+ goto out;
+
+ sz = len / sizeof(*clkfreq);
+ if (sz != 2 * cnt) {
+ dev_err(dev, "%s len mismatch\n", "freq-table-hz");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
+ GFP_KERNEL);
+ if (!clkfreq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, "freq-table-hz",
+ clkfreq, sz);
+ if (ret && (ret != -EINVAL)) {
+ dev_err(dev, "%s: error reading array %d\n",
+ "freq-table-hz", ret);
+ return ret;
+ }
+
+ for (i = 0; i < sz; i += 2) {
+ ret = of_property_read_string_index(np, "clock-names", i/2,
+ &name);
+ if (ret)
+ goto out;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ clki->min_freq = clkfreq[i];
+ clki->max_freq = clkfreq[i+1];
+ clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!clki->name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!strcmp(name, "ref_clk"))
+ clki->keep_link_active = true;
+ dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
+ clki->min_freq, clki->max_freq, clki->name);
+ list_add_tail(&clki->list, &hba->clk_list_head);
+ }
+out:
+ return ret;
+}
+
+static bool phandle_exists(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
+
+ if (parse_np)
+ of_node_put(parse_np);
+
+ return parse_np != NULL;
+}
+
+#define MAX_PROP_SIZE 32
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
+{
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!phandle_exists(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!vreg->name)
+ return -ENOMEM;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
+ dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
+ vreg->max_uA = 0;
+ }
+out:
+ *out_vreg = vreg;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
+
+/**
+ * ufshcd_parse_regulator_info - get regulator info from device tree
+ * @hba: per adapter instance
+ *
+ * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
+ * If any of the supplies are not defined it is assumed that they are always-on
+ * and hence return zero. If the property is defined but parsing is failed
+ * then return corresponding error.
+ */
+static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+out:
+ return err;
+}
+
+void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
+}
+EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
+
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+ &hba->lanes_per_direction);
+ if (ret) {
+ dev_dbg(hba->dev,
+ "%s: failed to read lanes-per-direction, ret=%d\n",
+ __func__, ret);
+ hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+ }
+}
+
+/**
+ * ufshcd_get_pwr_dev_param - get finally agreed attributes for
+ * power mode change
+ * @pltfrm_param: pointer to platform parameters
+ * @dev_max: pointer to device attributes
+ * @agreed_pwr: returned agreed attributes
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_pltfrm_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_pltfrm_max_hs = false;
+
+ if (dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
+ is_pltfrm_max_hs = true;
+ min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
+ pltfrm_param->hs_tx_gear);
+ } else {
+ min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
+ pltfrm_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * pltfrm_param->desired_working_mode is HS,
+ * thus device and pltfrm_param don't agree
+ */
+ if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+ pr_info("%s: device doesn't support HS\n",
+ __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since pltfrm_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to pltfrm_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+ agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+ } else {
+ /*
+ * here pltfrm_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases pltfrm_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+ agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+ pltfrm_param->tx_lanes);
+ agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+ pltfrm_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
+ (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+ agreed_pwr->gear_rx =
+ min_t(u32, min_dev_gear, min_pltfrm_gear);
+ } else if (!is_dev_sup_hs) {
+ agreed_pwr->gear_rx = min_dev_gear;
+ } else {
+ agreed_pwr->gear_rx = min_pltfrm_gear;
+ }
+ agreed_pwr->gear_tx = agreed_pwr->gear_rx;
+
+ agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+
+void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
+{
+ *dev_param = (struct ufs_dev_params){
+ .tx_lanes = 2,
+ .rx_lanes = 2,
+ .hs_rx_gear = UFS_HS_G3,
+ .hs_tx_gear = UFS_HS_G3,
+ .pwm_rx_gear = UFS_PWM_G4,
+ .pwm_tx_gear = UFS_PWM_G4,
+ .rx_pwr_pwm = SLOW_MODE,
+ .tx_pwr_pwm = SLOW_MODE,
+ .rx_pwr_hs = FAST_MODE,
+ .tx_pwr_hs = FAST_MODE,
+ .hs_rate = PA_HS_MODE_B,
+ .desired_working_mode = UFS_HS_MODE,
+ };
+}
+EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
+
+/**
+ * ufshcd_pltfrm_init - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ * @vops: pointer to variant ops
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_pltfrm_init(struct platform_device *pdev,
+ const struct ufs_hba_variant_ops *vops)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int irq, err;
+ struct device *dev = &pdev->dev;
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmio_base)) {
+ err = PTR_ERR(mmio_base);
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto out;
+ }
+
+ err = ufshcd_alloc_host(dev, &hba);
+ if (err) {
+ dev_err(dev, "Allocation failed\n");
+ goto out;
+ }
+
+ hba->vops = vops;
+
+ err = ufshcd_parse_clock_info(hba);
+ if (err) {
+ dev_err(dev, "%s: clock parse failed %d\n",
+ __func__, err);
+ goto dealloc_host;
+ }
+ err = ufshcd_parse_regulator_info(hba);
+ if (err) {
+ dev_err(dev, "%s: regulator init failed %d\n",
+ __func__, err);
+ goto dealloc_host;
+ }
+
+ ufshcd_init_lanes_per_dir(hba);
+
+ err = ufshcd_init(hba, mmio_base, irq);
+ if (err) {
+ dev_err(dev, "Initialization failed\n");
+ goto dealloc_host;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+dealloc_host:
+ ufshcd_dealloc_host(hba);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
new file mode 100644
index 000000000..2e4ba2bfb
--- /dev/null
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef UFSHCD_PLTFRM_H_
+#define UFSHCD_PLTFRM_H_
+
+#include <ufs/ufshcd.h>
+
+#define UFS_PWM_MODE 1
+#define UFS_HS_MODE 2
+
+struct ufs_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr);
+void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
+int ufshcd_pltfrm_init(struct platform_device *pdev,
+ const struct ufs_hba_variant_ops *vops);
+void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg);
+
+#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/ufs/host/ufshci-dwc.h b/drivers/ufs/host/ufshci-dwc.h
new file mode 100644
index 000000000..6c290e272
--- /dev/null
+++ b/drivers/ufs/host/ufshci-dwc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ */
+
+#ifndef _UFSHCI_DWC_H
+#define _UFSHCI_DWC_H
+
+/* DWC HC UFSHCI specific Registers */
+enum dwc_specific_registers {
+ DWC_UFS_REG_HCLKDIV = 0xFC,
+};
+
+/* Clock Divider Values: Hex equivalent of frequency in MHz */
+enum clk_div_values {
+ DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e,
+ DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d,
+ DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8,
+};
+
+/* Selector Index */
+enum selector_index {
+ SELIND_LN0_TX = 0x00,
+ SELIND_LN1_TX = 0x01,
+ SELIND_LN0_RX = 0x04,
+ SELIND_LN1_RX = 0x05,
+};
+
+#endif /* End of Header */