summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/dsi/phy
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/msm/dsi/phy
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/msm/dsi/phy')
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c855
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h134
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1061
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1086
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c148
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c822
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c660
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c1104
8 files changed, 5870 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
new file mode 100644
index 000000000..62bc3756f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "dsi_phy.h"
+
+#define S_DIV_ROUND_UP(n, d) \
+ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+ s32 min_result, bool even)
+{
+ s32 v;
+
+ v = (tmax - tmin) * percent;
+ v = S_DIV_ROUND_UP(v, 100) + tmin;
+ if (even && (v & 0x1))
+ return max_t(s32, min_result, v - 1);
+ else
+ return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
+ s32 ui, s32 coeff, s32 pcnt)
+{
+ s32 tmax, tmin, clk_z;
+ s32 temp;
+
+ /* reset */
+ temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ if (tmin > 255) {
+ tmax = 511;
+ clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+ } else {
+ tmax = 255;
+ clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+ }
+
+ /* adjust */
+ temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+ timing->clk_zero = clk_z + 8 - temp;
+}
+
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 10;
+ s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+ tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+ temp = lpx / ui;
+ if (temp & 0x1)
+ timing->hs_rqst = temp;
+ else
+ timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+ /* Calculate clk_zero after clk_prepare and hs_rqst */
+ dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ temp = 85 * coeff + 6 * ui;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 40 * coeff + 4 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+ tmax = 255;
+ temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+ temp = 145 * coeff + 10 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 60 * coeff + 4 * ui;
+ tmin = DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ tmax = 255;
+ tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+ tmax = 63;
+ temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+ temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
+ false);
+ tmax = 63;
+ temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+ temp += 8 * ui + lpx;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ if (tmin > tmax) {
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = true;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = false;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln, pd_ckln, pd;
+ s32 val, val_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+ timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
+ pd_ckln = timing->hs_prep_dly_ckln;
+ timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
+ pd = timing->hs_prep_dly;
+
+ val = (hb_en << 2) + (pd << 1);
+ val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff - val_ckln * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt_clk_prep = 50;
+ s32 pcnt_clk_zero = 2;
+ s32 pcnt_clk_trail = 30;
+ s32 pcnt_hs_prep = 50;
+ s32 pcnt_hs_zero = 10;
+ s32 pcnt_hs_trail = 30;
+ s32 pcnt_hs_exit = 10;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ hb_en = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ /* TODO: verify these calculations against latest downstream driver
+ * everything except clk_post/clk_pre uses calculations from v3 based
+ * on the downstream driver having the same calculations for v3 and v4
+ */
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
+
+ /* recommended min
+ * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
+ */
+ temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
+
+ /* recommended min
+ * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
+ * val2 = (16 * bit_clk_ns)
+ * final = roundup(val1/val2, 0) - 1
+ */
+ temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
+
+ return 0;
+}
+
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x7;
+ s32 tmax, tmin;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x7 = ui * 7;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x7;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
+
+ tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
+ tmax = 255;
+ timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
+
+ tmin = 1;
+ tmax = 32;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
+
+ tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
+ tmax = 64;
+ timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
+
+ DBG("%d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
+
+ return 0;
+}
+
+static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+{
+ struct device *dev = &phy->pdev->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->ahb_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
+{
+ clk_disable_unprepare(phy->ahb_clk);
+ pm_runtime_put(&phy->pdev->dev);
+}
+
+static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+ { .compatible = "qcom,dsi-phy-28nm-hpm",
+ .data = &dsi_phy_28nm_hpm_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
+ .data = &dsi_phy_28nm_hpm_famb_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-lp",
+ .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+ { .compatible = "qcom,dsi-phy-20nm",
+ .data = &dsi_phy_20nm_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+ { .compatible = "qcom,dsi-phy-28nm-8960",
+ .data = &dsi_phy_28nm_8960_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+ { .compatible = "qcom,dsi-phy-14nm",
+ .data = &dsi_phy_14nm_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-660",
+ .data = &dsi_phy_14nm_660_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-8953",
+ .data = &dsi_phy_14nm_8953_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+ { .compatible = "qcom,dsi-phy-10nm",
+ .data = &dsi_phy_10nm_cfgs },
+ { .compatible = "qcom,dsi-phy-10nm-8998",
+ .data = &dsi_phy_10nm_8998_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+ { .compatible = "qcom,dsi-phy-7nm",
+ .data = &dsi_phy_7nm_cfgs },
+ { .compatible = "qcom,dsi-phy-7nm-8150",
+ .data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sc7280-dsi-phy-7nm",
+ .data = &dsi_phy_7nm_7280_cfgs },
+#endif
+ {}
+};
+
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi_phy; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+ struct msm_dsi_phy *phy;
+ struct device *dev = &pdev->dev;
+ u32 phy_type;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->provided_clocks = devm_kzalloc(dev,
+ struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
+ GFP_KERNEL);
+ if (!phy->provided_clocks)
+ return -ENOMEM;
+
+ phy->provided_clocks->num = NUM_PROVIDED_CLKS;
+
+ phy->cfg = of_device_get_match_data(&pdev->dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
+ phy->pdev = pdev;
+
+ phy->id = dsi_phy_get_id(phy);
+ if (phy->id < 0)
+ return dev_err_probe(dev, phy->id,
+ "Couldn't identify PHY index\n");
+
+ phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+ "qcom,dsi-phy-regulator-ldo-mode");
+ if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
+ phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
+
+ phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
+ if (IS_ERR(phy->base))
+ return dev_err_probe(dev, PTR_ERR(phy->base),
+ "Failed to map phy base\n");
+
+ phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
+ if (IS_ERR(phy->pll_base))
+ return dev_err_probe(dev, PTR_ERR(phy->pll_base),
+ "Failed to map pll base\n");
+
+ if (phy->cfg->has_phy_lane) {
+ phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
+ if (IS_ERR(phy->lane_base))
+ return dev_err_probe(dev, PTR_ERR(phy->lane_base),
+ "Failed to map phy lane base\n");
+ }
+
+ if (phy->cfg->has_phy_regulator) {
+ phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
+ if (IS_ERR(phy->reg_base))
+ return dev_err_probe(dev, PTR_ERR(phy->reg_base),
+ "Failed to map phy regulator base\n");
+ }
+
+ if (phy->cfg->ops.parse_dt_properties) {
+ ret = phy->cfg->ops.parse_dt_properties(phy);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
+ phy->cfg->regulator_data,
+ &phy->supplies);
+ if (ret)
+ return ret;
+
+ phy->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(phy->ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
+
+ /* PLL init will call into clk_register which requires
+ * register access, so we need to enable power and ahb clock.
+ */
+ ret = dsi_phy_enable_resource(phy);
+ if (ret)
+ return ret;
+
+ if (phy->cfg->ops.pll_init) {
+ ret = phy->cfg->ops.pll_init(phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "PLL init failed; need separate clk driver\n");
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ phy->provided_clocks);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
+
+ dsi_phy_disable_resource(phy);
+
+ platform_set_drvdata(pdev, phy);
+
+ return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+ .probe = dsi_phy_driver_probe,
+ .driver = {
+ .name = "msm_dsi_phy",
+ .of_match_table = dsi_phy_dt_match,
+ },
+};
+
+void __init msm_dsi_phy_driver_register(void)
+{
+ platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void __exit msm_dsi_phy_driver_unregister(void)
+{
+ platform_driver_unregister(&dsi_phy_platform_driver);
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings)
+{
+ struct device *dev;
+ int ret;
+
+ if (!phy || !phy->cfg->ops.enable)
+ return -EINVAL;
+
+ dev = &phy->pdev->dev;
+
+ ret = dsi_phy_enable_resource(phy);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+ __func__, ret);
+ goto res_en_fail;
+ }
+
+ ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
+ __func__, ret);
+ goto reg_en_fail;
+ }
+
+ ret = phy->cfg->ops.enable(phy, clk_req);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ goto phy_en_fail;
+ }
+
+ memcpy(shared_timings, &phy->timing.shared_timings,
+ sizeof(*shared_timings));
+
+ /*
+ * Resetting DSI PHY silently changes its PLL registers to reset status,
+ * which will confuse clock driver and result in wrong output rate of
+ * link clocks. Restore PLL status if its PLL is being used as clock
+ * source.
+ */
+ if (phy->usecase != MSM_DSI_PHY_SLAVE) {
+ ret = msm_dsi_phy_pll_restore_state(phy);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
+ __func__, ret);
+ goto pll_restor_fail;
+ }
+ }
+
+ return 0;
+
+pll_restor_fail:
+ if (phy->cfg->ops.disable)
+ phy->cfg->ops.disable(phy);
+phy_en_fail:
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
+reg_en_fail:
+ dsi_phy_disable_resource(phy);
+res_en_fail:
+ return ret;
+}
+
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ if (!phy || !phy->cfg->ops.disable)
+ return;
+
+ phy->cfg->ops.disable(phy);
+
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
+ dsi_phy_disable_resource(phy);
+}
+
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+ enum msm_dsi_phy_usecase uc)
+{
+ if (phy)
+ phy->usecase = uc;
+}
+
+/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!phy || !phy->cfg->ops.set_continuous_clock)
+ return false;
+
+ return phy->cfg->ops.set_continuous_clock(phy, enable);
+}
+
+void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
+{
+ if (phy->cfg->ops.save_pll_state) {
+ phy->cfg->ops.save_pll_state(phy);
+ phy->state_saved = true;
+ }
+}
+
+int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ int ret;
+
+ if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
+ ret = phy->cfg->ops.restore_pll_state(phy);
+ if (ret)
+ return ret;
+
+ phy->state_saved = false;
+ }
+
+ return 0;
+}
+
+void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
+{
+ msm_disp_snapshot_add_block(disp_state,
+ phy->base_size, phy->base,
+ "dsi%d_phy", phy->id);
+
+ /* Do not try accessing PLL registers if it is switched off */
+ if (phy->pll_on)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->pll_size, phy->pll_base,
+ "dsi%d_pll", phy->id);
+
+ if (phy->lane_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->lane_size, phy->lane_base,
+ "dsi%d_lane", phy->id);
+
+ if (phy->reg_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->reg_size, phy->reg_base,
+ "dsi%d_reg", phy->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000..60a99c652
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); }
+#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); }
+
+struct msm_dsi_phy_ops {
+ int (*pll_init)(struct msm_dsi_phy *phy);
+ int (*enable)(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req);
+ void (*disable)(struct msm_dsi_phy *phy);
+ void (*save_pll_state)(struct msm_dsi_phy *phy);
+ int (*restore_pll_state)(struct msm_dsi_phy *phy);
+ bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
+ int (*parse_dt_properties)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
+ struct msm_dsi_phy_ops ops;
+
+ unsigned long min_pll_rate;
+ unsigned long max_pll_rate;
+
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi_phy;
+ const int quirks;
+ bool has_phy_regulator;
+ bool has_phy_lane;
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
+
+struct msm_dsi_dphy_timing {
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+
+ struct msm_dsi_phy_shared_timings shared_timings;
+
+ /* For PHY v2 only */
+ u32 hs_rqst_ckln;
+ u32 hs_prep_dly;
+ u32 hs_prep_dly_ckln;
+ u8 hs_halfbyte_en;
+ u8 hs_halfbyte_en_ckln;
+};
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+#define DSI_LANE_MAX 5
+
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ void __iomem *base;
+ void __iomem *pll_base;
+ void __iomem *reg_base;
+ void __iomem *lane_base;
+ phys_addr_t base_size;
+ phys_addr_t pll_size;
+ phys_addr_t reg_size;
+ phys_addr_t lane_size;
+ int id;
+
+ struct clk *ahb_clk;
+ struct regulator_bulk_data *supplies;
+
+ struct msm_dsi_dphy_timing timing;
+ const struct msm_dsi_phy_cfg *cfg;
+ void *tuning_cfg;
+
+ enum msm_dsi_phy_usecase usecase;
+ bool regulator_ldo_mode;
+ bool cphy_mode;
+
+ struct clk_hw *vco_hw;
+ bool pll_on;
+
+ struct clk_hw_onecell_data *provided_clocks;
+
+ bool state_saved;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+
+#endif /* __DSI_PHY_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 000000000..27b592c77
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,1061 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_10nm.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE 19200000
+#define FRAC_BITS 18
+
+/* v3.0.0 10nm implementation that requires the old timings settings */
+#define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0)
+
+struct dsi_pll_config {
+ bool enable_ssc;
+ bool ssc_center;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+
+ /* out */
+ u32 pll_prop_gain_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize;
+ u32 ssc_div_per;
+};
+
+struct pll_10nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_10nm_cached_state cached_state;
+
+ struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
+
+/**
+ * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
+ * @rescode_offset_top: Offset for pull-up legs rescode.
+ * @rescode_offset_bot: Offset for pull-down legs rescode.
+ * @vreg_ctrl: vreg ctrl to drive LDO level
+ */
+struct dsi_phy_10nm_tuning_cfg {
+ u8 rescode_offset_top[DSI_LANE_MAX];
+ u8 rescode_offset_bot[DSI_LANE_MAX];
+ u8 vreg_ctrl;
+};
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+
+ config->enable_ssc = false;
+ config->ssc_center = false;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ u64 fref = VCO_REF_CLK_RATE;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ divider = fref * 2;
+
+ multiplier = 1 << FRAC_BITS;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+ if (pll_freq <= 1900000000UL)
+ config->pll_prop_gain_rate = 8;
+ else if (pll_freq <= 3000000000UL)
+ config->pll_prop_gain_rate = 10;
+ else
+ config->pll_prop_gain_rate = 12;
+ if (pll_freq < 1100000000UL)
+ config->pll_clock_inverters = 8;
+ else
+ config->pll_clock_inverters = 0;
+
+ config->decimal_div_start = dec;
+ config->frac_div_start = frac;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = config->frac_div_start;
+ ssc_step_size = config->decimal_div_start;
+ ssc_step_size *= (1 << FRAC_BITS);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ config->ssc_div_per = ssc_per;
+ config->ssc_stepsize = ssc_step_size;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ config->decimal_div_start, frac, FRAC_BITS);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ if (config->enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ config->ssc_stepsize & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ config->ssc_stepsize >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ config->ssc_div_per & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ config->ssc_div_per >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+ config->ssc_adj_per & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+ config->ssc_adj_per >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+ 0xba);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
+ 0x0c);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
+ 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
+ 0xfa);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+ 0x4c);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ config->frac_div_start & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ (config->frac_div_start & 0xff00) >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ (config->frac_div_start & 0x30000) >> 16);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ struct dsi_pll_config config;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
+ parent_rate);
+
+ pll_10nm->vco_current_rate = rate;
+
+ dsi_pll_setup_config(&config);
+
+ dsi_pll_calc_dec_frac(pll_10nm, &config);
+
+ dsi_pll_calc_ssc(pll_10nm, &config);
+
+ dsi_pll_commit(pll_10nm, &config);
+
+ dsi_pll_config_hzindep_reg(pll_10nm);
+
+ dsi_pll_ssc_commit(pll_10nm, &config);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+ struct device *dev = &pll->phy->pdev->dev;
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->phy->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data | BIT(5));
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ struct device *dev = &pll_10nm->phy->pdev->dev;
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+ if (rc) {
+ DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Start PLL */
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(pll_10nm);
+ if (rc) {
+ DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
+ goto error;
+ }
+
+ pll_10nm->phy->pll_on = true;
+
+ dsi_pll_enable_global_clk(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_global_clk(pll_10nm->slave);
+
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+ 0x01);
+ if (pll_10nm->slave)
+ dsi_phy_write(pll_10nm->slave->phy->base +
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_10nm);
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_10nm);
+ if (pll_10nm->slave) {
+ dsi_pll_disable_global_clk(pll_10nm->slave);
+ dsi_pll_disable_sub(pll_10nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll_10nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ void __iomem *base = pll_10nm->phy->pll_base;
+ u64 ref_clk = VCO_REF_CLK_RATE;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ */
+ multiplier = 1 << FRAC_BITS;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+ if (rate < pll_10nm->phy->cfg->min_pll_rate)
+ return pll_10nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_10nm->phy->cfg->max_pll_rate)
+ return pll_10nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+ .round_rate = dsi_pll_10nm_clk_round_rate,
+ .set_rate = dsi_pll_10nm_vco_set_rate,
+ .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+ .prepare = dsi_pll_10nm_vco_prepare,
+ .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy->base;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy->base;
+ u32 val;
+ int ret;
+
+ val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+ ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
+ pll_10nm->vco_current_rate,
+ VCO_REF_CLK_RATE);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ DBG("DSI PLL%d", pll_10nm->phy->id);
+
+ return 0;
+}
+
+static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ void __iomem *base = phy->base;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_10nm->phy->id);
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_10nm_vco,
+ };
+ struct device *dev = &pll_10nm->phy->pdev->dev;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *pclk_mux;
+ int ret;
+
+ DBG("DSI%d", pll_10nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
+ pll_10nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
+
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
+
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+ pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ pll_out_div,
+ pll_post_out_div,
+ }), 4, 0, pll_10nm->phy->base +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
+ if (IS_ERR(pclk_mux)) {
+ ret = PTR_ERR(pclk_mux);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
+ 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+
+fail:
+
+ return ret;
+}
+
+static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_10nm *pll_10nm;
+ int ret;
+
+ pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+ if (!pll_10nm)
+ return -ENOMEM;
+
+ DBG("DSI PLL%d", phy->id);
+
+ pll_10nm_list[phy->id] = pll_10nm;
+
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
+ pll_10nm->phy = phy;
+
+ ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_10nm->clk_hw;
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_phy_pll_save_state(phy);
+
+ return 0;
+}
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ void __iomem *lane_base = phy->lane_base;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
+
+ if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
+ tx_dctrl[3] = 0x02;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+ 0x55);
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+ 0x88);
+ }
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+ i == 4 ? 0x80 : 0x0);
+
+ /* platform specific dsi phy drive strength adjustment */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
+ tuning_cfg->rescode_offset_top[i]);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
+ tuning_cfg->rescode_offset_bot[i]);
+
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+ tx_dctrl[i]);
+ }
+
+ if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+ }
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* Select MS1 byte-clk */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+ /* Enable LDO with platform specific drive level/amplitude adjustment */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
+ tuning_cfg->vreg_ctrl);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+ timing->hs_halfbyte_en);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+ timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+ timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+ timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+ timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+ timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+ timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+ timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+ timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+ timing->ta_go | (timing->ta_sure << 3));
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+ timing->ta_get);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+ 0x00);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+ /* power up lanes */
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* TODO: only power up lanes that are used */
+ data |= 0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = dsi_10nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v3_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("Turning OFF PHY while PLL is on\n");
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* disable all lanes */
+ data &= ~0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
+
+ /* Turn off all PHY blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
+ /* make sure phy is turned off */
+ wmb();
+
+ DBG("DSI%d PHY disabled", phy->id);
+}
+
+static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
+{
+ struct device *dev = &phy->pdev->dev;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
+ s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
+ s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
+ u32 ldo_level = 400; /* 400mV */
+ u8 level;
+ int ret, i;
+
+ tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
+ if (!tuning_cfg)
+ return -ENOMEM;
+
+ /* Drive strength adjustment parameters */
+ ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
+ offset_top, DSI_LANE_MAX);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < DSI_LANE_MAX; i++) {
+ if (offset_top[i] < -32 || offset_top[i] > 31) {
+ DRM_DEV_ERROR(dev,
+ "qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
+ offset_top[i]);
+ return -EINVAL;
+ }
+ tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
+ }
+
+ ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
+ offset_bot, DSI_LANE_MAX);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < DSI_LANE_MAX; i++) {
+ if (offset_bot[i] < -32 || offset_bot[i] > 31) {
+ DRM_DEV_ERROR(dev,
+ "qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
+ offset_bot[i]);
+ return -EINVAL;
+ }
+ tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
+ }
+
+ /* Drive level/amplitude adjustment parameters */
+ ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
+ return ret;
+ }
+
+ switch (ldo_level) {
+ case 375:
+ level = 0;
+ break;
+ case 400:
+ level = 1;
+ break;
+ case 425:
+ level = 2;
+ break;
+ case 450:
+ level = 3;
+ break;
+ case 475:
+ level = 4;
+ break;
+ case 500:
+ level = 5;
+ break;
+ default:
+ DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
+ return -EINVAL;
+ }
+ tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
+
+ phy->tuning_cfg = tuning_cfg;
+
+ return 0;
+}
+
+static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .pll_init = dsi_pll_10nm_init,
+ .save_pll_state = dsi_10nm_pll_save_state,
+ .restore_pll_state = dsi_10nm_pll_restore_state,
+ .parse_dt_properties = dsi_10nm_phy_parse_dt,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .pll_init = dsi_pll_10nm_init,
+ .save_pll_state = dsi_10nm_pll_save_state,
+ .restore_pll_state = dsi_10nm_pll_restore_state,
+ .parse_dt_properties = dsi_10nm_phy_parse_dt,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xc994400, 0xc996400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
new file mode 100644
index 000000000..f0780c40b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_14nm.xml.h"
+
+#define PHY_14NM_CKLN_IDX 4
+
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ * dsi0n1_postdiv_clk
+ * |
+ * |
+ * +----+ | +----+
+ * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ * +----+ | +----+
+ * | dsi0n1_postdivby2_clk
+ * | +----+ |
+ * o---| /2 |--o--|\
+ * | +----+ | \ +----+
+ * | | |--| n2 |-- dsi0pll
+ * o--------------| / +----+
+ * |/
+ */
+
+#define POLL_MAX_READS 15
+#define POLL_TIMEOUT_US 1000
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 1300000000UL
+#define VCO_MAX_RATE 2600000000UL
+
+struct dsi_pll_config {
+ u64 vco_current_rate;
+
+ u32 ssc_en; /* SSC enable/disable */
+
+ /* fixed params */
+ u32 plllock_cnt;
+ u32 ssc_center;
+ u32 ssc_adj_period;
+ u32 ssc_spread;
+ u32 ssc_freq;
+
+ /* calculated */
+ u32 dec_start;
+ u32 div_frac_start;
+ u32 ssc_period;
+ u32 ssc_step_size;
+ u32 plllock_cmp;
+ u32 pll_vco_div_ref;
+ u32 pll_vco_count;
+ u32 pll_kvco_div_ref;
+ u32 pll_kvco_count;
+};
+
+struct pll_14nm_cached_state {
+ unsigned long vco_rate;
+ u8 n2postdiv;
+ u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_14nm_cached_state cached_state;
+
+ struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, clk_hw)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+ struct clk_hw hw;
+
+ /* divider params */
+ u8 shift;
+ u8 width;
+ u8 flags; /* same flags as used by clk_divider struct */
+
+ struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false, pll_ready = false;
+ void __iomem *base = pll_14nm->phy->pll_base;
+ u32 tries, val;
+
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_locked = !!(val & BIT(5));
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+
+ if (!pll_locked)
+ goto out;
+
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_ready = !!(val & BIT(0));
+
+ if (pll_ready)
+ break;
+
+ udelay(timeout_us);
+ }
+
+out:
+ DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
+
+ return pll_locked && pll_ready;
+}
+
+static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
+{
+ /* fixed input */
+ pconf->plllock_cnt = 1;
+
+ /*
+ * SSC is enabled by default. We might need DT props for configuring
+ * some SSC params like PPM and center/down spread etc.
+ */
+ pconf->ssc_en = 1;
+ pconf->ssc_center = 0; /* down spread by default */
+ pconf->ssc_spread = 5; /* PPM / 1000 */
+ pconf->ssc_freq = 31500; /* default recommended */
+ pconf->ssc_adj_period = 37;
+}
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u32 period, ssc_period;
+ u32 ref, rem;
+ u64 step_size;
+
+ DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE);
+
+ ssc_period = pconf->ssc_freq / 500;
+ period = (u32)VCO_REF_CLK_RATE / 1000;
+ ssc_period = CEIL(period, ssc_period);
+ ssc_period -= 1;
+ pconf->ssc_period = ssc_period;
+
+ DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq,
+ pconf->ssc_spread, pconf->ssc_period);
+
+ step_size = (u32)pconf->vco_current_rate;
+ ref = VCO_REF_CLK_RATE;
+ ref /= 1000;
+ step_size = div_u64(step_size, ref);
+ step_size <<= 20;
+ step_size = div_u64(step_size, 1000);
+ step_size *= pconf->ssc_spread;
+ step_size = div_u64(step_size, 1000);
+ step_size *= (pconf->ssc_adj_period + 1);
+
+ rem = 0;
+ step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+ if (rem)
+ step_size++;
+
+ DBG("step_size=%lld", step_size);
+
+ step_size &= 0x0ffff; /* take lower 16 bits */
+
+ pconf->ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u64 multiplier = BIT(20);
+ u64 dec_start_multiple, dec_start, pll_comp_val;
+ u32 duration, div_frac_start;
+ u64 vco_clk_rate = pconf->vco_current_rate;
+ u64 fref = VCO_REF_CLK_RATE;
+
+ DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+ dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+ dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+ pconf->dec_start = (u32)dec_start;
+ pconf->div_frac_start = div_frac_start;
+
+ if (pconf->plllock_cnt == 0)
+ duration = 1024;
+ else if (pconf->plllock_cnt == 1)
+ duration = 256;
+ else if (pconf->plllock_cnt == 2)
+ duration = 128;
+ else
+ duration = 32;
+
+ pll_comp_val = duration * dec_start_multiple;
+ pll_comp_val = div_u64(pll_comp_val, multiplier);
+ do_div(pll_comp_val, 10);
+
+ pconf->plllock_cmp = (u32)pll_comp_val;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+ u32 slop = 0;
+
+ if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+ slop = 600;
+ else if (vrate > 1800000000UL && vrate < 2300000000UL)
+ slop = 400;
+ else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+ slop = 280;
+
+ return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u64 vco_clk_rate = pconf->vco_current_rate;
+ u64 fref = VCO_REF_CLK_RATE;
+ u32 vco_measure_time = 5;
+ u32 kvco_measure_time = 5;
+ u64 data;
+ u32 cnt;
+
+ data = fref * vco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 2;
+ pconf->pll_vco_div_ref = data;
+
+ data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */
+ data *= vco_measure_time;
+ do_div(data, 10);
+ pconf->pll_vco_count = data;
+
+ data = fref * kvco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 1;
+ pconf->pll_kvco_div_ref = data;
+
+ cnt = pll_14nm_kvco_slop(vco_clk_rate);
+ cnt *= 2;
+ cnt /= 100;
+ cnt *= kvco_measure_time;
+ pconf->pll_kvco_count = cnt;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 data;
+
+ data = pconf->ssc_adj_period;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+ data = (pconf->ssc_adj_period >> 8);
+ data &= 0x03;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+ data = pconf->ssc_period;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+ data = (pconf->ssc_period >> 8);
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+ data = pconf->ssc_step_size;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+ data = (pconf->ssc_step_size >> 8);
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+ data = (pconf->ssc_center & 0x01);
+ data <<= 1;
+ data |= 0x01; /* enable */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+ struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 data;
+
+ /* confgiure the non frequency dependent pll registers */
+ data = 0;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
+
+ data = pconf->pll_vco_div_ref & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+ data = (pconf->pll_vco_div_ref >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+ data = pconf->pll_kvco_div_ref & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+ data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+ void __iomem *cmn_base = pll_14nm->phy->base;
+
+ /* de assert pll start and apply pll sw reset */
+
+ /* stop pll */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+ /* pll sw reset */
+ dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+ wmb(); /* make sure register committed */
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+ struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ void __iomem *cmn_base = pll->phy->base;
+ u8 data;
+
+ DBG("DSI%d PLL", pll->phy->id);
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
+
+ pll_db_commit_common(pll, pconf);
+
+ pll_14nm_software_reset(pll);
+
+ /* Use the /2 path in Mux */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+ /* configure the frequency dependent pll registers */
+ data = pconf->dec_start;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+ data = pconf->div_frac_start & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+ data = (pconf->div_frac_start >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+ data = (pconf->div_frac_start >> 16) & 0xf;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+ data = pconf->plllock_cmp & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+ data = (pconf->plllock_cmp >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+ data = (pconf->plllock_cmp >> 16) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+ data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+ data = pconf->pll_vco_count & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+ data = (pconf->pll_vco_count >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+ data = pconf->pll_kvco_count & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+ data = (pconf->pll_kvco_count >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+ /*
+ * High nibble configures the post divider internal to the VCO. It's
+ * fixed to divide by 1 for now.
+ *
+ * 0: divided by 1
+ * 1: divided by 2
+ * 2: divided by 4
+ * 3: divided by 8
+ */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
+
+ if (pconf->ssc_en)
+ pll_db_commit_ssc(pll, pconf);
+
+ wmb(); /* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ struct dsi_pll_config conf;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate,
+ parent_rate);
+
+ dsi_pll_14nm_config_init(&conf);
+ conf.vco_current_rate = rate;
+
+ pll_14nm_dec_frac_calc(pll_14nm, &conf);
+
+ if (conf.ssc_en)
+ pll_14nm_ssc_calc(pll_14nm, &conf);
+
+ pll_14nm_calc_vco_count(pll_14nm, &conf);
+
+ /* commit the slave DSI PLL registers if we're master. Note that we
+ * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+ * of the master and slave are identical
+ */
+ if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+ pll_db_commit_14nm(pll_14nm_slave, &conf);
+ }
+
+ pll_db_commit_14nm(pll_14nm, &conf);
+
+ return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *base = pll_14nm->phy->pll_base;
+ u64 vco_rate, multiplier = BIT(20);
+ u32 div_frac_start;
+ u32 dec_start;
+ u64 ref_clk = parent_rate;
+
+ dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+
+ DBG("dec_start = %x", dec_start);
+
+ div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+ & 0xf) << 16;
+ div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+ & 0xff) << 8;
+ div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+ & 0xff;
+
+ DBG("div_frac_start = %x", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ /*
+ * Recalculating the rate from dec_start and frac_start doesn't end up
+ * the rate we originally set. Convert the freq to KHz, round it up and
+ * convert it back to MHz.
+ */
+ vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+ DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+ return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *base = pll_14nm->phy->pll_base;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ bool locked;
+
+ DBG("");
+
+ if (unlikely(pll_14nm->phy->pll_on))
+ return 0;
+
+ if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
+ dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+ locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_14nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *cmn_base = pll_14nm->phy->base;
+
+ DBG("");
+
+ if (unlikely(!pll_14nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+ pll_14nm->phy->pll_on = false;
+}
+
+static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+
+ if (rate < pll_14nm->phy->cfg->min_pll_rate)
+ return pll_14nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_14nm->phy->cfg->max_pll_rate)
+ return pll_14nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+ .round_rate = dsi_pll_14nm_clk_round_rate,
+ .set_rate = dsi_pll_14nm_vco_set_rate,
+ .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+ .prepare = dsi_pll_14nm_vco_prepare,
+ .unprepare = dsi_pll_14nm_vco_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width) ((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy->base;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
+
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+ val &= div_mask(width);
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ postdiv->flags, width);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+
+ return divider_round_rate(hw, rate, prate, NULL,
+ postdiv->width,
+ postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy->base;
+ spinlock_t *lock = &pll_14nm->postdiv_lock;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate,
+ parent_rate);
+
+ value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+ postdiv->flags);
+
+ spin_lock_irqsave(lock, flags);
+
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+ val &= ~(div_mask(width) << shift);
+
+ val |= value << shift;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+ /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
+ * follow the master's post dividers
+ */
+ if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+ dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+ .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+ .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ u32 data;
+
+ data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+ cached_state->n1postdiv = data & 0xf;
+ cached_state->n2postdiv = (data >> 4) & 0xf;
+
+ DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ u32 data;
+ int ret;
+
+ ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+ DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+ /* also restore post-dividers for slave DSI PLL */
+ if (phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+ dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+ }
+
+ return 0;
+}
+
+static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ void __iomem *base = phy->pll_base;
+ u32 clkbuflr_en, bandgap = 0;
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ clkbuflr_en = 0x1;
+ break;
+ case MSM_DSI_PHY_MASTER:
+ clkbuflr_en = 0x3;
+ pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ clkbuflr_en = 0x0;
+ bandgap = 0x3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+ if (bandgap)
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+ return 0;
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+ const char *name,
+ const struct clk_hw *parent_hw,
+ unsigned long flags,
+ u8 shift)
+{
+ struct dsi_pll_14nm_postdiv *pll_postdiv;
+ struct device *dev = &pll_14nm->phy->pdev->dev;
+ struct clk_init_data postdiv_init = {
+ .parent_hws = (const struct clk_hw *[]) { parent_hw },
+ .num_parents = 1,
+ .name = name,
+ .flags = flags,
+ .ops = &clk_ops_dsi_pll_14nm_postdiv,
+ };
+ int ret;
+
+ pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+ if (!pll_postdiv)
+ return ERR_PTR(-ENOMEM);
+
+ pll_postdiv->pll = pll_14nm;
+ pll_postdiv->shift = shift;
+ /* both N1 and N2 postdividers are 4 bits wide */
+ pll_postdiv->width = 4;
+ /* range of each divider is from 1 to 15 */
+ pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+ pll_postdiv->hw.init = &postdiv_init;
+
+ ret = devm_clk_hw_register(dev, &pll_postdiv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_14nm_vco,
+ };
+ struct device *dev = &pll_14nm->phy->pdev->dev;
+ struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
+ int ret;
+
+ DBG("DSI%d", pll_14nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
+ pll_14nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+
+ /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+ n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
+ &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(n1_postdiv))
+ return PTR_ERR(n1_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / N1 / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+
+ /*
+ * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+ * on the way. Don't let it set parent.
+ */
+ n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, n1_postdiv, 0, 1, 2);
+ if (IS_ERR(n1_postdivby2))
+ return PTR_ERR(n1_postdivby2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
+
+ /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+ * This is the output of N2 post-divider, bits 4-7 in
+ * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+ */
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
+ 0, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_14nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_14nm *pll_14nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+ if (!pll_14nm)
+ return -ENOMEM;
+
+ DBG("PLL%d", phy->id);
+
+ pll_14nm_list[phy->id] = pll_14nm;
+
+ spin_lock_init(&pll_14nm->postdiv_lock);
+
+ pll_14nm->phy = phy;
+
+ ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_14nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing,
+ int lane_idx)
+{
+ void __iomem *base = phy->lane_base;
+ bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
+ u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
+ u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
+ u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
+ u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
+ u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
+ u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
+ timing->hs_halfbyte_en;
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
+ DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
+ halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
+}
+
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ u32 data;
+ int i;
+ int ret;
+ void __iomem *base = phy->base;
+ void __iomem *lane_base = phy->lane_base;
+ u32 glbl_test_ctrl;
+
+ if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ data = 0x1c;
+ if (phy->usecase != MSM_DSI_PHY_STANDALONE)
+ data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+ /* 4 data lanes + 1 clk lane configuration */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
+ 0x1d);
+
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
+
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
+ 0);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
+ 0x88);
+
+ dsi_14nm_dphy_set_timing(phy, timing, i);
+ }
+
+ /* Make sure PLL is not start */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ wmb(); /* make sure everything is written before reset and enable */
+
+ /* reset digital block */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
+ wmb(); /* ensure reset is asserted */
+ udelay(100);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
+
+ glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+ glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
+ ret = dsi_14nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Remove power down from PLL and all lanes */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
+
+ return 0;
+}
+
+static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
+
+ /* ensure that the phy is completely disabled */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 17000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 73400 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x994400, 0x996400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_73p4mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xc994400, 0xc996400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000..c9752b991
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_20nm.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+ DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+ DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+ DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+ DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+ DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+ DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+ DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+ DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+ DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+ DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+ DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode) {
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+ return;
+ }
+
+ /* non LDO mode */
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+ u32 val;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_20nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+ val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
+ val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+ (i >> 1) * 0x40);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+ }
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+ dsi_20nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+ /* make sure everything is written before enable */
+ wmb();
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+ return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+ dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_20nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
+ .ops = {
+ .enable = dsi_20nm_phy_enable,
+ .disable = dsi_20nm_phy_disable,
+ },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
+ .num_dsi_phy = 2,
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000..4c1bf55c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_28nm.xml.h"
+
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ * dsi0analog_postdiv_clk
+ * | dsi0indirect_path_div2_clk
+ * | |
+ * +------+ | +----+ | |\ dsi0byte_mux
+ * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
+ * | +------+ +----+ | m| | +----+
+ * | | u|--o--| /4 |-- dsi0pllbyte
+ * | | x| +----+
+ * o--------------------------| /
+ * | |/
+ * | +------+
+ * o----------| DIV3 |------------------------- dsi0pll
+ * +------+
+ */
+
+#define POLL_MAX_READS 10
+#define POLL_TIMEOUT_US 50
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 350000000
+#define VCO_MAX_RATE 750000000
+
+/* v2.0.0 28nm LP implementation */
+#define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
+
+#define LPFR_LUT_SIZE 10
+struct lpfr_cfg {
+ unsigned long vco_rate;
+ u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+ { 479500000, 8 },
+ { 480000000, 11 },
+ { 575500000, 8 },
+ { 576000000, 12 },
+ { 610500000, 8 },
+ { 659500000, 9 },
+ { 671500000, 10 },
+ { 672000000, 14 },
+ { 708500000, 10 },
+ { 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv1;
+ u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
+ pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ /*
+ * Add HW recommended delays after toggling the software
+ * reset bit off and back on.
+ */
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ unsigned long div_fbx1000, gen_vco_clk;
+ u32 refclk_cfg, frac_n_mode, frac_n_value;
+ u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+ u32 cal_cfg10, cal_cfg11;
+ u32 rem;
+ int i;
+
+ VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ /* Force postdiv2 to be div-4 */
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+ /* Configure the Loop filter resistance */
+ for (i = 0; i < LPFR_LUT_SIZE; i++)
+ if (rate <= lpfr_lut[i].vco_rate)
+ break;
+ if (i == LPFR_LUT_SIZE) {
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
+ rate);
+ return -EINVAL;
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+ /* Loop filter capacitance values : c1 and c2 */
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+ rem = rate % VCO_REF_CLK_RATE;
+ if (rem) {
+ refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ frac_n_mode = 1;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+ } else {
+ refclk_cfg = 0x0;
+ frac_n_mode = 0;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+ }
+
+ DBG("refclk_cfg = %d", refclk_cfg);
+
+ rem = div_fbx1000 % 1000;
+ frac_n_value = (rem << 16) / 1000;
+
+ DBG("div_fb = %lu", div_fbx1000);
+ DBG("frac_n_value = %d", frac_n_value);
+
+ DBG("Generated VCO Clock: %lu", gen_vco_clk);
+ rem = 0;
+ sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+ sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+ if (frac_n_mode) {
+ sdm_cfg0 = 0x0;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg3 = frac_n_value >> 8;
+ sdm_cfg2 = frac_n_value & 0xff;
+ } else {
+ sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+ sdm_cfg2 = 0;
+ sdm_cfg3 = 0;
+ }
+
+ DBG("sdm_cfg0=%d", sdm_cfg0);
+ DBG("sdm_cfg1=%d", sdm_cfg1);
+ DBG("sdm_cfg2=%d", sdm_cfg2);
+ DBG("sdm_cfg3=%d", sdm_cfg3);
+
+ cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+ cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+ DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+ /* Add hardware recommended delay for correct PLL configuration */
+ if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ udelay(1000);
+ else
+ udelay(1);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 sdm0, doubler, sdm_byp_div;
+ u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+ u32 ref_clk = VCO_REF_CLK_RATE;
+ unsigned long vco_rate;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ /* Check to see if the ref clk doubler is enabled */
+ doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+ DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+ /* see if it is integer mode or sdm mode */
+ sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+ if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+ /* integer mode */
+ sdm_byp_div = FIELD(
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+ DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+ vco_rate = ref_clk * sdm_byp_div;
+ } else {
+ /* sdm mode */
+ sdm_dc_off = FIELD(
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+ DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+ DBG("sdm_dc_off = %d", sdm_dc_off);
+ sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+ sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+ sdm_freq_seed = (sdm3 << 8) | sdm2;
+ DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+ vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+ mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+ DBG("vco rate = %lu", vco_rate);
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
+{
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 max_reads = 5, timeout_us = 100;
+ bool locked;
+ u32 val;
+ int i;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+ for (i = 0; i < 2; i++) {
+ /* DSI Uniphy lock detect setting */
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+ 0x0c, 100);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ /* poll for PLL ready status */
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
+ timeout_us);
+ if (locked)
+ break;
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+ val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+ }
+
+ if (unlikely(!locked))
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL Lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ int i, ret;
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ for (i = 0; i < 3; i++) {
+ ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
+ if (!ret) {
+ pll_28nm->phy->pll_on = true;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ bool locked;
+ u32 max_reads = 10, timeout_us = 50;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+ DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ /* DSI PLL toggle lock detect setting */
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_28nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(!pll_28nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+
+ pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ if (rate < pll_28nm->phy->cfg->min_pll_rate)
+ return pll_28nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+ return pll_28nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare_hpm,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare_lp,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ cached_state->postdiv3 =
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+ cached_state->postdiv1 =
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+ cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+ if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+ else
+ cached_state->vco_rate = 0;
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ int ret;
+
+ ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ cached_state->postdiv3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ cached_state->postdiv1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+ cached_state->byte_mux);
+
+ return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref", .name = "xo",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ };
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
+ int ret;
+
+ DBG("%d", pll_28nm->phy->id);
+
+ if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
+ else
+ vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ pll_28nm->clk_hw.init = &vco_init;
+ ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+ analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ 0, 4, 0, NULL);
+ if (IS_ERR(analog_postdiv))
+ return PTR_ERR(analog_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+ indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
+ if (IS_ERR(indirect_path_div2))
+ return PTR_ERR(indirect_path_div2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ 0, 8, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
+ byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ &pll_28nm->clk_hw,
+ indirect_path_div2,
+ }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+ if (IS_ERR(byte_mux))
+ return PTR_ERR(byte_mux);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ byte_mux, CLK_SET_RATE_PARENT, 1, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_28nm *pll_28nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return -ENOMEM;
+
+ pll_28nm->phy = phy;
+
+ ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_28nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+}
+
+static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+
+ if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
+ else
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!enable) {
+ dsi_phy_write(phy->reg_base +
+ REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode)
+ dsi_28nm_phy_regulator_enable_ldo(phy);
+ else
+ dsi_28nm_phy_regulator_enable_dcdc(phy);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 val;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+ val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xfd922b00, 0xfd923100 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a98500 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 000000000..26c08047e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_28nm_8960.xml.h"
+
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ * +------+
+ * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ * F * byte_clk | +------+
+ * | bit clock divider (F / 8)
+ * |
+ * | +------+
+ * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ * | +------+ | (sets parent rate)
+ * | byte clock divider (F) |
+ * | |
+ * | o---> To esc RCG
+ * | (doesn't set parent rate)
+ * |
+ * | +------+
+ * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ * +------+ | (sets parent rate)
+ * dsi clock divider (F * magic) |
+ * |
+ * o---> To pixel rcg
+ * (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS 8000
+#define POLL_TIMEOUT_US 1
+
+#define VCO_REF_CLK_RATE 27000000
+#define VCO_MIN_RATE 600000000
+#define VCO_MAX_RATE 1200000000
+
+#define VCO_PREF_DIV_RATIO 27
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv2;
+ u8 postdiv1;
+};
+
+struct clk_bytediv {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ int nb_tries, int timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
+ pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 val, temp, fb_divider;
+
+ DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ temp = rate / 10;
+ val = VCO_REF_CLK_RATE / 10;
+ fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+ fb_divider = fb_divider / 2 - 1;
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+ fb_divider & 0xff);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+ val |= (fb_divider >> 8) & 0x07;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+ val);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+ val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+ val);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+ 0xf);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val |= 0x7 << 4;
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ val);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ unsigned long vco_rate;
+ u32 status, fb_divider, temp, ref_divider;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+ if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+ fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+ fb_divider &= 0xff;
+ temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+ fb_divider = (temp << 8) | fb_divider;
+ fb_divider += 1;
+
+ ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+ ref_divider &= 0x3f;
+ ref_divider += 1;
+
+ /* multiply by 2 */
+ vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+ } else {
+ vco_rate = 0;
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ bool locked;
+ unsigned int bit_div, byte_div;
+ int max_reads = 1000, timeout_us = 100;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ /*
+ * before enabling the PLL, configure the bit clock divider since we
+ * don't expose it as a clock to the outside world
+ * 1: read back the byte clock divider that should already be set
+ * 2: divide by 8 to get bit clock divider
+ * 3: write it to POSTDIV1
+ */
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ byte_div = val + 1;
+ bit_div = byte_div / 8;
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val &= ~0xf;
+ val |= (bit_div - 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+ /* enable the PLL */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_28nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(!pll_28nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+
+ pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ if (rate < pll_28nm->phy->cfg->min_pll_rate)
+ return pll_28nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+ return pll_28nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ unsigned int div;
+
+ div = dsi_phy_read(bytediv->reg) & 0xff;
+
+ return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+ unsigned long bit_mhz;
+
+ /* convert to bit clock in Mhz */
+ bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+ if (bit_mhz < 125)
+ return 64;
+ else if (bit_mhz < 250)
+ return 32;
+ else if (bit_mhz < 600)
+ return 16;
+ else
+ return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long best_parent;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ best_parent = rate * factor;
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+ return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ u32 val;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ val = dsi_phy_read(bytediv->reg);
+ val |= (factor - 1) & 0xff;
+ dsi_phy_write(bytediv->reg, val);
+
+ return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+ .round_rate = clk_bytediv_round_rate,
+ .set_rate = clk_bytediv_set_rate,
+ .recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ cached_state->postdiv3 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+ cached_state->postdiv2 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ cached_state->postdiv1 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ int ret;
+
+ ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ cached_state->postdiv3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+ cached_state->postdiv2);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ cached_state->postdiv1);
+
+ return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_28nm_vco,
+ };
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ struct clk_hw *hw;
+ struct clk_bytediv *bytediv;
+ struct clk_init_data bytediv_init = { };
+ int ret;
+
+ DBG("%d", pll_28nm->phy->id);
+
+ bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+ if (!bytediv)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ vco_init.name = clk_name;
+
+ pll_28nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+ if (ret)
+ return ret;
+
+ /* prepare and register bytediv */
+ bytediv->hw.init = &bytediv_init;
+ bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
+
+ bytediv_init.name = clk_name;
+ bytediv_init.ops = &clk_bytediv_ops;
+ bytediv_init.flags = CLK_SET_RATE_PARENT;
+ bytediv_init.parent_hws = (const struct clk_hw*[]){
+ &pll_28nm->clk_hw,
+ };
+ bytediv_init.num_parents = 1;
+
+ /* DIV2 */
+ ret = devm_clk_hw_register(dev, &bytediv->hw);
+ if (ret)
+ return ret;
+ provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
+ /* DIV3 */
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ 0, 8, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_28nm *pll_28nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return -ENOMEM;
+
+ pll_28nm->phy = phy;
+
+ ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_28nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
+ 0x100);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
+}
+
+static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+ u32 status;
+ int i = 5000;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
+ 0x3);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
+ usleep_range(5000, 6000);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
+
+ do {
+ status = dsi_phy_read(base +
+ REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
+
+ if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
+ break;
+
+ udelay(1);
+ } while (--i > 0);
+}
+
+static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
+ 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
+ 0x01);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
+ 0x66);
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dsi_28nm_phy_regulator_init(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
+
+ /* strength control */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
+
+ /* phy ctrl */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
+
+ dsi_28nm_phy_regulator_ctrl(phy);
+
+ dsi_28nm_phy_calibration(phy);
+
+ dsi_28nm_phy_lane_config(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_8960_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_8960_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x4700300, 0x5800300 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
new file mode 100644
index 000000000..9e7fa7d88
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -0,0 +1,1104 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_7nm.xml.h"
+
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE 19200000
+#define FRAC_BITS 18
+
+/* Hardware is V4.1 */
+#define DSI_PHY_7NM_QUIRK_V4_1 BIT(0)
+
+struct dsi_pll_config {
+ bool enable_ssc;
+ bool ssc_center;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+
+ /* out */
+ u32 decimal_div_start;
+ u32 frac_div_start;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize;
+ u32 ssc_div_per;
+};
+
+struct pll_7nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_7nm_cached_state cached_state;
+
+ struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+ config->ssc_freq = 31500;
+ config->ssc_offset = 4800;
+ config->ssc_adj_per = 2;
+
+ /* TODO: ssc enable */
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ u64 fref = VCO_REF_CLK_RATE;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ divider = fref * 2;
+
+ multiplier = 1 << FRAC_BITS;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+ if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
+ config->pll_clock_inverters = 0x28;
+ else if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3020000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+
+ config->decimal_div_start = dec;
+ config->frac_div_start = frac;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = config->frac_div_start;
+ ssc_step_size = config->decimal_div_start;
+ ssc_step_size *= (1 << FRAC_BITS);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ config->ssc_div_per = ssc_per;
+ config->ssc_stepsize = ssc_step_size;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ config->decimal_div_start, frac, FRAC_BITS);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ if (config->enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ config->ssc_stepsize & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ config->ssc_stepsize >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ config->ssc_div_per & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ config->ssc_div_per >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+ config->ssc_adj_per & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+ config->ssc_adj_per >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (pll->vco_current_rate >= 3100000000ULL)
+ analog_controls_five_1 = 0x03;
+
+ if (pll->vco_current_rate < 1520000000ULL)
+ vco_config_1 = 0x08;
+ else if (pll->vco_current_rate < 2990000000ULL)
+ vco_config_1 = 0x01;
+ }
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+ analog_controls_five_1);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+ pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1 ? 0x3f : 0x22);
+
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ if (pll->slave)
+ dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ config->frac_div_start & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ (config->frac_div_start & 0xff00) >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ (config->frac_div_start & 0x30000) >> 16);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
+ pll->phy->cphy_mode ? 0x00 : 0x10);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ struct dsi_pll_config config;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
+ parent_rate);
+
+ pll_7nm->vco_current_rate = rate;
+
+ dsi_pll_setup_config(&config);
+
+ dsi_pll_calc_dec_frac(pll_7nm, &config);
+
+ dsi_pll_calc_ssc(pll_7nm, &config);
+
+ dsi_pll_commit(pll_7nm, &config);
+
+ dsi_pll_config_hzindep_reg(pll_7nm);
+
+ dsi_pll_ssc_commit(pll_7nm, &config);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->phy->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+ /*
+ * Reset the PHY digital domain. This would be needed when
+ * coming out of a CX or analog rail power collapse while
+ * ensuring that the pads maintain LP00 or LP11 state
+ */
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+ wmb(); /* Ensure that the reset is deasserted */
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+ wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+ /* Start PLL */
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_7nm_lock_status(pll_7nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id);
+ goto error;
+ }
+
+ pll_7nm->phy->pll_on = true;
+
+ /*
+ * assert power on reset for PHY digital in case the PLL is
+ * enabled after CX of analog domain power collapse. This needs
+ * to be done before enabling the global clk.
+ */
+ dsi_pll_phy_dig_reset(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+ dsi_pll_enable_global_clk(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_7nm);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_7nm);
+ if (pll_7nm->slave) {
+ dsi_pll_disable_global_clk(pll_7nm->slave);
+ dsi_pll_disable_sub(pll_7nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll_7nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ void __iomem *base = pll_7nm->phy->pll_base;
+ u64 ref_clk = VCO_REF_CLK_RATE;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ */
+ multiplier = 1 << FRAC_BITS;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+ if (rate < pll_7nm->phy->cfg->min_pll_rate)
+ return pll_7nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_7nm->phy->cfg->max_pll_rate)
+ return pll_7nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+ .round_rate = dsi_pll_7nm_clk_round_rate,
+ .set_rate = dsi_pll_7nm_vco_set_rate,
+ .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+ .prepare = dsi_pll_7nm_vco_prepare,
+ .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy->base;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy->base;
+ u32 val;
+ int ret;
+
+ val = dsi_phy_read(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+ ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+ pll_7nm->vco_current_rate,
+ VCO_REF_CLK_RATE);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_7nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ DBG("DSI PLL%d", pll_7nm->phy->id);
+
+ return 0;
+}
+
+static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ void __iomem *base = phy->base;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_7nm->phy->id);
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_7nm_vco,
+ };
+ struct device *dev = &pll_7nm->phy->pdev->dev;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
+ int ret;
+
+ DBG("DSI%d", pll_7nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
+ pll_7nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+
+ if (pll_7nm->phy->cphy_mode)
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 2, 7);
+ else
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
+ goto fail;
+ }
+
+ /* in CPHY mode, pclk_mux will always have post_out_div as parent
+ * don't register a pclk_mux clock and just use post_out_div instead
+ */
+ if (pll_7nm->phy->cphy_mode) {
+ u32 data;
+
+ data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
+
+ phy_pll_out_dsi_parent = pll_post_out_div;
+ } else {
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ 0, 1, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ phy_pll_out_dsi_parent = hw;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ phy_pll_out_dsi_parent, 0,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+
+fail:
+
+ return ret;
+}
+
+static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_7nm *pll_7nm;
+ int ret;
+
+ pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+ if (!pll_7nm)
+ return -ENOMEM;
+
+ DBG("DSI PLL%d", phy->id);
+
+ pll_7nm_list[phy->id] = pll_7nm;
+
+ spin_lock_init(&pll_7nm->postdiv_lock);
+
+ pll_7nm->phy = phy;
+
+ ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_7nm->clk_hw;
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_phy_pll_save_state(phy);
+
+ return 0;
+}
+
+static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
+ const u8 *tx_dctrl = tx_dctrl_0;
+ void __iomem *lane_base = phy->lane_base;
+
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)
+ tx_dctrl = tx_dctrl_1;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
+ }
+
+ dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
+ }
+}
+
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ bool less_than_1500_mhz;
+ u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
+ u32 glbl_pemph_ctrl_0;
+ u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+ u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
+ u32 data;
+
+ DBG("");
+
+ if (phy->cphy_mode)
+ ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req);
+ else
+ ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v4_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* TODO: CPHY enable path (this is for DPHY only) */
+
+ /* Alter PHY configurations if data rate less than 1.5GHZ*/
+ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+ if (phy->cphy_mode) {
+ glbl_rescode_top_ctrl = 0x00;
+ glbl_rescode_bot_ctrl = 0x3c;
+ } else {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
+ }
+ glbl_str_swi_cal_sel_ctrl = 0x00;
+ glbl_hstx_str_ctrl_0 = 0x88;
+ } else {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
+ if (phy->cphy_mode) {
+ glbl_str_swi_cal_sel_ctrl = 0x03;
+ glbl_hstx_str_ctrl_0 = 0x66;
+ } else {
+ glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
+ glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
+ }
+ glbl_rescode_top_ctrl = 0x03;
+ glbl_rescode_bot_ctrl = 0x3c;
+ }
+
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x51;
+ vreg_ctrl_1 = 0x55;
+ glbl_pemph_ctrl_0 = 0x11;
+ lane_ctrl0 = 0x17;
+ } else {
+ vreg_ctrl_1 = 0x5c;
+ glbl_pemph_ctrl_0 = 0x00;
+ lane_ctrl0 = 0x1f;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* program CMN_CTRL_4 for minor_ver 2 chipsets*/
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
+ data = data & (0xf0);
+ if (data == 0x20)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+ glbl_str_swi_cal_sel_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
+ glbl_hstx_str_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
+ glbl_pemph_ctrl_0);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+ glbl_rescode_top_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+ glbl_rescode_bot_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
+
+ /* Select full-rate mode */
+ if (!phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = dsi_7nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI PHY timings */
+ if (phy->cphy_mode) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
+ timing->shared_timings.clk_post);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ } else {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+ timing->shared_timings.clk_post);
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v4_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
+ if (enable)
+ data |= BIT(5) | BIT(6);
+ else
+ data &= ~(BIT(5) | BIT(6));
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
+
+ return enable;
+}
+
+static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (dsi_phy_hw_v4_0_is_pll_on(phy))
+ pr_warn("Turning OFF PHY while PLL is on\n");
+
+ dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ /* disable all lanes */
+ data &= ~0x1F;
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
+
+ /* Turn off all PHY blocks */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
+ /* make sure phy is turned off */
+ wmb();
+
+ DBG("DSI%d PHY disabled", phy->id);
+}
+
+static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 37550 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000ULL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};