summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/soc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/i915/soc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/soc')
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c724
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c169
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.h18
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c281
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h91
6 files changed, 1297 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
new file mode 100644
index 0000000000..15492b69f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_dram.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+#include "vlv_sideband.h"
+
+struct dram_dimm_info {
+ u16 size;
+ u8 width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u16 ddrpll, csipll;
+
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+}
+
+static void chv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+}
+
+static void vlv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+}
+
+static void detect_mem_freq(struct drm_i915_private *i915)
+{
+ if (IS_PINEVIEW(i915))
+ pnv_detect_mem_freq(i915);
+ else if (GRAPHICS_VER(i915) == 5)
+ ilk_detect_mem_freq(i915);
+ else if (IS_CHERRYVIEW(i915))
+ chv_detect_mem_freq(i915);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_detect_mem_freq(i915);
+
+ if (i915->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+}
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total Gb for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return (val & SKL_DRAM_SIZE_MASK) * 8;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total Gb for the whole DIMM */
+static int icl_get_dimm_size(u16 val)
+{
+ return (val & ICL_DRAM_SIZE_MASK) * 8 / 2;
+}
+
+static int icl_get_dimm_width(u16 val)
+{
+ if (icl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & ICL_DRAM_WIDTH_MASK) {
+ case ICL_DRAM_WIDTH_X8:
+ case ICL_DRAM_WIDTH_X16:
+ case ICL_DRAM_WIDTH_X32:
+ val = (val & ICL_DRAM_WIDTH_MASK) >> ICL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int icl_get_dimm_ranks(u16 val)
+{
+ if (icl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & ICL_DRAM_RANK_MASK) >> ICL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total Gb to Gb per DRAM device */
+ return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (GRAPHICS_VER(i915) >= 11) {
+ dimm->size = icl_get_dimm_size(val);
+ dimm->width = icl_get_dimm_width(val);
+ dimm->ranks = icl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ str_yes_no(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ str_yes_no(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * Gb to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 val;
+ u8 valid_ranks = 0;
+ int i;
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 val = 0;
+ int ret;
+
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ if (ret)
+ return ret;
+
+ if (GRAPHICS_VER(dev_priv) == 12) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -EINVAL;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -EINVAL;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
+ dram_info->num_psf_gv_points = (val & 0x3000) >> 12;
+
+ return 0;
+}
+
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+ int ret = skl_get_dram_info(i915);
+
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+ i915->dram_info.wm_lv_0_adjust_needed = false;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+{
+ u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
+ struct dram_info *dram_info = &i915->dram_info;
+
+ switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val);
+ return -EINVAL;
+ }
+
+ dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
+ dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
+ /* PSF GV points not supported in D14+ */
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ detect_mem_freq(i915);
+
+ if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
+ return;
+
+ /*
+ * Assume level 0 watermark latency adjustment is needed until proven
+ * otherwise, this w/a is not needed by bxt/glk.
+ */
+ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
+
+ if (DISPLAY_VER(i915) >= 14)
+ ret = xelpdp_get_dram_info(i915);
+ else if (GRAPHICS_VER(i915) >= 12)
+ ret = gen12_get_dram_info(i915);
+ else if (GRAPHICS_VER(i915) >= 11)
+ ret = gen11_get_dram_info(i915);
+ else if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+ str_yes_no(dram_info->wm_lv_0_adjust_needed));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
+ return;
+
+ edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (GRAPHICS_VER(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
new file mode 100644
index 0000000000..4ba13c1316
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
new file mode 100644
index 0000000000..49c7fb16e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/pnp.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_gmch.h"
+#include "intel_pci_config.h"
+
+static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915)
+{
+ int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
+
+ i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
+ if (!i915->gmch.pdev) {
+ drm_err(&i915->drm, "bridge device not found\n");
+ return -EIO;
+ }
+
+ return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
+ i915->gmch.pdev);
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_i915_private *i915)
+{
+ int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+ if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+
+ /* Get some space for it */
+ i915->gmch.mch_res.name = "i915 MCHBAR";
+ i915->gmch.mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
+ &i915->gmch.mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ i915->gmch.pdev);
+ if (ret) {
+ drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
+ i915->gmch.mch_res.start = 0;
+ return ret;
+ }
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ upper_32_bits(i915->gmch.mch_res.start));
+
+ pci_write_config_dword(i915->gmch.pdev, reg,
+ lower_32_bits(i915->gmch.mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+void intel_gmch_bar_setup(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = false;
+
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ }
+}
+
+void intel_gmch_bar_teardown(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (i915->gmch.mchbar_need_disable) {
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ u32 deven_val;
+
+ pci_read_config_dword(i915->gmch.pdev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (i915->gmch.mch_res.start)
+ release_resource(&i915->gmch.mch_res);
+}
+
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.h b/drivers/gpu/drm/i915/soc/intel_gmch.h
new file mode 100644
index 0000000000..d0133eedc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GMCH_H__
+#define __INTEL_GMCH_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_teardown(struct drm_i915_private *i915);
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode);
+
+#endif /* __INTEL_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
new file mode 100644
index 0000000000..19a8f27c40
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_pch.h"
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
+ drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
+ /* PPT is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
+ /* WPT is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
+ /* WPT is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv));
+ /* CMP is CNP compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
+ /* CMP-V is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
+ /* MCC is TGP compatible */
+ return PCH_TGP;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv) &&
+ !IS_GEN9_BC(dev_priv));
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
+ /* JSP is ICP compatible */
+ return PCH_ICP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
+ !IS_ALDERLAKE_P(dev_priv));
+ return PCH_ADP;
+ case INTEL_PCH_MTP_DEVICE_ID_TYPE:
+ case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
+ return PCH_MTP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+ unsigned short *pch_id, enum intel_pch *pch_type)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (IS_METEORLAKE(dev_priv))
+ id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
+ else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (GRAPHICS_VER(dev_priv) == 5)
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
+
+ *pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
+}
+
+void intel_detect_pch(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ /* DG1 has south engine display on the same PCI device */
+ if (IS_DG1(dev_priv)) {
+ dev_priv->pch_type = PCH_DG1;
+ return;
+ } else if (IS_DG2(dev_priv)) {
+ dev_priv->pch_type = PCH_DG2;
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan trhough
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ } else if (!pch) {
+ if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+ }
+ }
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
new file mode 100644
index 0000000000..32aff5a70d
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_pch.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+struct drm_i915_private;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
+ PCH_ADP, /* Alder Lake PCH */
+ PCH_MTP, /* Meteor Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
+ PCH_DG2,
+};
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
+#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
+#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
+#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
+#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
+#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
+#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
+#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+void intel_detect_pch(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PCH__ */