summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/uc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c95
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c57
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
19 files changed, 213 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 811add10c3..c34674e797 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -207,6 +207,27 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+struct slpc_context_frequency_request {
+ u32 frequency_request:16;
+ u32 reserved:12;
+ u32 is_compute:1;
+ u32 ignore_busyness:1;
+ u32 is_minimum:1;
+ u32 is_predefined:1;
+} __packed;
+
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+
+struct slpc_optimized_strategies {
+ u32 compute:1;
+ u32 async_flip:1;
+ u32 media:1;
+ u32 vsync_flip:1;
+ u32 reserved:28;
+} __packed;
+
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index dabeaf4f24..00d6402333 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -36,6 +36,7 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 4f4f53c42a..525587cfe1 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -101,4 +101,11 @@ enum {
GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
};
+/*
+ * Workaround keys:
+ */
+enum {
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e2e42b3e0d..3b69bc6616 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -298,7 +298,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- intel_guc_write_barrier(&gt->uc.guc);
+ intel_guc_write_barrier(gt_to_guc(gt));
i915_gem_object_unpin_map(gsc->fw.obj);
@@ -351,7 +351,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 40817ebcca..a7d5465655 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
+ GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2b450c43bb..5e60a34692 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */
@@ -294,6 +294,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ flags |= GUC_WA_RCS_CCS_SWITCHOUT;
+
/*
* Wa_14012197797
* Wa_22011391025
@@ -315,15 +320,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_G11(gt->i915))
flags |= GUC_WA_CONTEXT_ISOLATION;
- /* Wa_16015675438 */
- if (!RCS_MASK(gt))
- flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
-
- /* Wa_14018913170 */
- if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
- if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- }
+ /*
+ * Wa_14018913170: Applicable to all platforms supported by i915 so
+ * don't bother testing for all X/Y/Z platforms explicitly.
+ */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index be70c46604..57b9031327 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -204,6 +204,8 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_waklv_size: size of workaround KLVs */
+ u32 ads_waklv_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index f7372f736a..c606bb5e3b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -46,6 +46,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_waklv_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_waklv_size);
+}
+
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
@@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_capture_offset(struct intel_guc *guc)
+static u32 guc_ads_waklv_offset(struct intel_guc *guc)
{
u32 offset;
@@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_waklv_offset(guc) +
+ guc_ads_waklv_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
@@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
@@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
-#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
@@ -796,6 +815,65 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
+/* Wa_14019159160 */
+static u32 guc_waklv_ra_mode(struct intel_guc *guc, u32 offset, u32 remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+
+ size = sizeof(klv_entry);
+ GEM_BUG_ON(remain < size);
+
+ iosys_map_memcpy_to(&guc->ads_map, offset, klv_entry, size);
+
+ return size;
+}
+
+static void guc_waklv_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 offset, addr_ggtt, remain, size;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+ offset = guc_ads_waklv_offset(guc);
+ remain = guc_ads_waklv_size(guc);
+
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ size = guc_waklv_ra_mode(guc, offset, remain);
+ offset += size;
+ remain -= size;
+ }
+
+ size = guc_ads_waklv_size(guc) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt);
+ ads_blob_write(guc, ads.wa_klv_addr_hi, 0);
+ ads_blob_write(guc, ads.wa_klv_size, size);
+}
+
+static int guc_prep_waklv(struct intel_guc *guc)
+{
+ /* Fudge something chunky for now: */
+ return PAGE_SIZE;
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -843,6 +921,9 @@ static void __guc_ads_init(struct intel_guc *guc)
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
+ /* Workaround KLV list */
+ guc_waklv_init(guc);
+
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
@@ -886,6 +967,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_capture_size = ret;
+ /* And don't forget the workaround KLVs: */
+ ret = guc_prep_waklv(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_waklv_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
@@ -961,7 +1048,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a1cd40d805..9547fff672 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -51,6 +51,7 @@
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_EIR(0), 0, 0, "EIR" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
@@ -80,9 +81,6 @@
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
-#define COMMON_BASE_HAS_EU \
- { EIR, 0, 0, "EIR" }
-
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
@@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
@@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
@@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!cap || !ee->engine)
return -ENODEV;
- guc = &ee->engine->gt->uc.guc;
+ guc = gt_to_guc(ee->engine->gt);
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
@@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
if (!gt || !ce || !engine)
return false;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return false;
@@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
if (!gt || !ee || !ce)
return;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 52332bb143..23f54c84cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
- if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
@@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
*success = false;
return true;
}
@@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -EPERM;
break;
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ guc_info(guc, "invalid w/a KLV entry\n");
+ ret = -EINVAL;
+ break;
+
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 8ae1846431..14797e80bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -96,8 +96,9 @@
#define GUC_WA_GAM_CREDITS BIT(10)
#define GUC_WA_DUAL_QUEUE BIT(11)
#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
-#define GUC_WA_CONTEXT_ISOLATION BIT(15)
#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
@@ -430,7 +431,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index cc9569af7f..b67a15f742 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -111,7 +111,7 @@ static bool has_table(struct drm_i915_private *i915)
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!has_table(gt->i915))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 3e681ab6fb..706fffca69 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -537,6 +537,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -711,6 +725,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Enable SLPC Optimized Strategy for compute */
+ intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 6ac6503c39..1cb5fd44f0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -45,5 +45,6 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0f83c6d437..0eaa106424 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce)
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
- return &ce->engine->gt->uc.guc;
+ return gt_to_guc(ce->engine->gt);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -1246,7 +1246,7 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
@@ -1311,7 +1311,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
@@ -1577,7 +1577,7 @@ static void guc_fini_engine_stats(struct intel_guc *guc)
void intel_guc_busyness_park(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
if (!guc_submission_initialized(guc))
return;
@@ -1604,7 +1604,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
unsigned long flags;
ktime_t unused;
@@ -2189,7 +2189,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
- struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(rq->engine->gt);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -2215,11 +2215,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
order_base_2(ce->parallel.number_children
+ 1));
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = ida_alloc_range(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids - 1,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
@@ -2242,8 +2241,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
- ida_simple_remove(&guc->submission_state.guc_ids,
- ce->guc_id.id);
+ ida_free(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
@@ -2640,6 +2639,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_CONTEXT_POLICY_ADD
@@ -2655,10 +2655,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
+ u32 slpc_ctx_freq_req = 0;
unsigned long flags;
int ret;
@@ -2670,11 +2671,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
+ slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
@@ -2731,7 +2736,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
@@ -2800,7 +2805,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
@@ -2863,7 +2868,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
@@ -4491,7 +4496,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
if (engine->class == COMPUTE_CLASS)
if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
+
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4502,7 +4513,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start;
}
@@ -4544,7 +4555,7 @@ static void guc_sched_engine_destroy(struct kref *kref)
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
@@ -5303,7 +5314,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
@@ -5365,7 +5376,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
unsigned long index;
unsigned long flags;
@@ -5817,7 +5828,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve)
return ERR_PTR(-ENOMEM);
- guc = &siblings[0]->gt->uc.guc;
+ guc = gt_to_guc(siblings[0]->gt);
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 0945b177d5..2d9152eb72 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -385,7 +385,7 @@ int intel_huc_init(struct intel_huc *huc)
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
- vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
@@ -540,7 +540,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 399bc31918..7a63abf8f6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
-
- /* XEHPSDV and PVC do not use HuC */
- if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
- i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 756093eaf2..d80278eb45 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -807,7 +807,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
- struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_file *guc_selected = &gt_to_guc(gt)->fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
@@ -1209,7 +1209,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index c900aac85a..68feb55654 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -144,7 +144,7 @@ err:
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;