diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /drivers/gpu/drm/i915/gt/uc | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc.h | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 249 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_uc.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 40 |
15 files changed, 469 insertions, 122 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index f359bef046..33f253410d 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -138,6 +138,8 @@ enum intel_guc_action { INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507, INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, + INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000, + INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001, INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002, INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003, INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004, @@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status { #define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF +#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0) +#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8) +#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31) + +enum intel_guc_tlb_invalidation_type { + INTEL_GUC_TLB_INVAL_ENGINES = 0x0, + INTEL_GUC_TLB_INVAL_GUC = 0x3, +}; + +/* + * 0: Heavy mode of Invalidation: + * The pipeline of the engine(s) for which the invalidation is targeted to is + * blocked, and all the in-flight transactions are guaranteed to be Globally + * Observed before completing the TLB invalidation + * 1: Lite mode of Invalidation: + * TLBs of the targeted engine(s) are immediately invalidated. + * In-flight transactions are NOT guaranteed to be Globally Observed before + * completing TLB invalidation. + * Light Invalidation Mode is to be used only when + * it can be guaranteed (by SW) that the address translations remain invariant + * for the in-flight transactions across the TLB invalidation. In other words, + * this mode can be used when the TLB invalidation is intended to clear out the + * stale cached translations that are no longer in use. Light Invalidation Mode + * is much faster than the Heavy Invalidation Mode, as it does not wait for the + * in-flight transactions to be GOd. + */ +enum intel_guc_tlb_inval_mode { + INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0, + INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1, +}; + #endif /* _ABI_GUC_ACTIONS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c index 0d3b22a743..453d855dd1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c @@ -68,8 +68,7 @@ static void gsc_work(struct work_struct *work) * A proxy failure right after firmware load means the proxy-init * step has failed so mark GSC as not usable after this */ - drm_err(>->i915->drm, - "GSC proxy handler failed to init\n"); + gt_err(gt, "GSC proxy handler failed to init\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); } goto out_put; @@ -83,11 +82,10 @@ static void gsc_work(struct work_struct *work) * status register to check if the proxy init was actually successful */ if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) { - drm_dbg(>->i915->drm, "GSC Proxy initialized\n"); + gt_dbg(gt, "GSC Proxy initialized\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING); } else { - drm_err(>->i915->drm, - "GSC status reports proxy init not complete\n"); + gt_err(gt, "GSC status reports proxy init not complete\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c index 89ed5ee9cd..2fde5c360c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c @@ -81,8 +81,17 @@ out_rq: i915_request_add(rq); - if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0) - err = -ETIME; + if (!err) { + /* + * Start timeout for i915_request_wait only after considering one possible + * pending GSC-HECI submission cycle on the other (non-privileged) path. + */ + if (wait_for(i915_request_started(rq), GSC_HECI_REPLY_LATENCY_MS)) + drm_dbg(&gsc_uc_to_gt(gsc)->i915->drm, + "Delay in gsc-heci-priv submission to gsccs-hw"); + if (i915_request_wait(rq, 0, msecs_to_jiffies(GSC_HECI_REPLY_LATENCY_MS)) < 0) + err = -ETIME; + } i915_request_put(rq); @@ -186,6 +195,13 @@ out_rq: i915_request_add(rq); if (!err) { + /* + * Start timeout for i915_request_wait only after considering one possible + * pending GSC-HECI submission cycle on the other (privileged) path. + */ + if (wait_for(i915_request_started(rq), GSC_HECI_REPLY_LATENCY_MS)) + drm_dbg(&gsc_uc_to_gt(gsc)->i915->drm, + "Delay in gsc-heci-non-priv submission to gsccs-hw"); if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, msecs_to_jiffies(timeout_ms)) < 0) err = -ETIME; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h index 09d3fbdad0..c4308291c0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h @@ -12,6 +12,12 @@ struct i915_vma; struct intel_context; struct intel_gsc_uc; +#define GSC_HECI_REPLY_LATENCY_MS 500 +/* + * Max FW response time is 500ms, but this should be counted from the time the + * command has hit the GSC-CS hardware, not the preceding handoff to GuC CTB. + */ + struct intel_gsc_mtl_header { u32 validity_marker; #define GSC_HECI_VALIDITY_MARKER 0xA578875A diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 569b5fe94c..3f3df1166b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -159,6 +159,21 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc) gen11_reset_guc_interrupts(guc); } +static void guc_dead_worker_func(struct work_struct *w) +{ + struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker); + struct intel_gt *gt = guc_to_gt(guc); + unsigned long last = guc->last_dead_guc_jiffies; + unsigned long delta = jiffies_to_msecs(jiffies - last); + + if (delta < 500) { + intel_gt_set_wedged(gt); + } else { + intel_gt_handle_error(gt, ALL_ENGINES, I915_ERROR_CAPTURE, "dead GuC"); + guc->last_dead_guc_jiffies = jiffies; + } +} + void intel_guc_init_early(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); @@ -171,6 +186,8 @@ void intel_guc_init_early(struct intel_guc *guc) intel_guc_slpc_init_early(&guc->slpc); intel_guc_rc_init_early(guc); + INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func); + mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); if (GRAPHICS_VER(i915) >= 11) { @@ -272,18 +289,14 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50)) flags |= GUC_WA_POLLCS; - /* Wa_16011759253:dg2_g10:a0 */ - if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) - flags |= GUC_WA_GAM_CREDITS; - /* Wa_14014475959 */ - if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) || + if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || IS_DG2(gt->i915)) flags |= GUC_WA_HOLD_CCS_SWITCHOUT; /* - * Wa_14012197797:dg2_g10:a0,dg2_g11:a0 - * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12 + * Wa_14012197797 + * Wa_22011391025 * * The same WA bit is used for both and 22011391025 is applicable to * all DG2. @@ -292,28 +305,26 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) flags |= GUC_WA_DUAL_QUEUE; /* Wa_22011802037: graphics version 11/12 */ - if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) || - (GRAPHICS_VER(gt->i915) >= 11 && - GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))) + if (intel_engine_reset_needs_wa_22011802037(gt)) flags |= GUC_WA_PRE_PARSER; - /* Wa_16011777198:dg2 */ - if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || - IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) - flags |= GUC_WA_RCS_RESET_BEFORE_RC6; - /* - * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..) - * Wa_22012727685:dg2_g11[a0..) + * Wa_22012727170 + * Wa_22012727685 */ - if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || - IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER)) + if (IS_DG2_G11(gt->i915)) flags |= GUC_WA_CONTEXT_ISOLATION; /* Wa_16015675438 */ if (!RCS_MASK(gt)) flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST; + /* Wa_14018913170 */ + if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) { + if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915)) + flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; + } + return flags; } @@ -461,6 +472,8 @@ void intel_guc_fini(struct intel_guc *guc) if (!intel_uc_fw_is_loadable(&guc->fw)) return; + flush_work(&guc->dead_guc_worker); + if (intel_guc_slpc_is_used(guc)) intel_guc_slpc_fini(&guc->slpc); @@ -585,6 +598,20 @@ out: return ret; } +int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action) +{ + if (action == INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED) + guc_err(guc, "Crash dump notification\n"); + else if (action == INTEL_GUC_ACTION_NOTIFY_EXCEPTION) + guc_err(guc, "Exception notification\n"); + else + guc_err(guc, "Unknown crash notification: 0x%04X\n", action); + + queue_work(system_unbound_wq, &guc->dead_guc_worker); + + return 0; +} + int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len) { @@ -601,6 +628,9 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, if (msg & INTEL_GUC_RECV_MSG_EXCEPTION) guc_err(guc, "Received early exception notification!\n"); + if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION)) + queue_work(system_unbound_wq, &guc->dead_guc_worker); + return 0; } @@ -640,6 +670,8 @@ int intel_guc_suspend(struct intel_guc *guc) return 0; if (intel_guc_submission_is_used(guc)) { + flush_work(&guc->dead_guc_worker); + /* * This H2G MMIO command tears down the GuC in two steps. First it will * generate a G2H CTB for every active context indicating a reset. In diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 8dc291ff00..2b6dfe62c8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -79,6 +79,18 @@ struct intel_guc { */ atomic_t outstanding_submission_g2h; + /** @tlb_lookup: xarray to store all pending TLB invalidation requests */ + struct xarray tlb_lookup; + + /** + * @serial_slot: id to the initial waiter created in tlb_lookup, + * which is used only when failed to allocate new waiter. + */ + u32 serial_slot; + + /** @next_seqno: the next id (sequence number) to allocate. */ + u32 next_seqno; + /** @interrupts: pointers to GuC interrupt-managing functions. */ struct { bool enabled; @@ -266,6 +278,20 @@ struct intel_guc { unsigned long last_stat_jiffies; } timestamp; + /** + * @dead_guc_worker: Asynchronous worker thread for forcing a GuC reset. + * Specifically used when the G2H handler wants to issue a reset. Resets + * require flushing the G2H queue. So, the G2H processing itself must not + * trigger a reset directly. Instead, go via this worker. + */ + struct work_struct dead_guc_worker; + /** + * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance + * used to prevent a fundamentally broken system from continuously + * reloading the GuC. + */ + unsigned long last_dead_guc_jiffies; + #ifdef CONFIG_DRM_I915_SELFTEST /** * @number_guc_id_stolen: The number of guc_ids that have been stolen @@ -274,6 +300,11 @@ struct intel_guc { #endif }; +struct intel_guc_tlb_wait { + struct wait_queue_head wq; + bool busy; +}; + /* * GuC version number components are only 8-bit, so converting to a 32bit 8.8.8 * integer works. @@ -281,6 +312,7 @@ struct intel_guc { #define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch) #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version) +#define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver) static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) { @@ -476,6 +508,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); int intel_guc_error_capture_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); +int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action); struct intel_engine_cs * intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance); @@ -499,4 +532,10 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p); int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc); +bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc); +int intel_guc_invalidate_tlb_engines(struct intel_guc *guc); +int intel_guc_invalidate_tlb_guc(struct intel_guc *guc); +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, + const u32 *payload, u32 len); +void wake_up_all_tlb_invalidate(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index 331cec07c1..a4da0208c8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -1101,8 +1101,8 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc) static int guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf) { - struct guc_state_capture_group_header_t ghdr = {0}; - struct guc_state_capture_header_t hdr = {0}; + struct guc_state_capture_group_header_t ghdr = {}; + struct guc_state_capture_header_t hdr = {}; struct __guc_capture_parsed_output *node = NULL; struct guc_mmio_reg *regs = NULL; int i, numlists, numregs, ret = 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 97eadd0818..89e314b375 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -96,13 +96,40 @@ struct ct_request { struct ct_incoming_msg { struct list_head link; u32 size; - u32 msg[]; + u32 msg[] __counted_by(size); }; enum { CTB_SEND = 0, CTB_RECV = 1 }; enum { CTB_OWNER_HOST = 0 }; +/* + * Some H2G commands involve a synchronous response that the driver needs + * to wait for. In such cases, a timeout is required to prevent the driver + * from waiting forever in the case of an error (either no error response + * is defined in the protocol or something has died and requires a reset). + * The specific command may be defined as having a time bound response but + * the CT is a queue and that time guarantee only starts from the point + * when the command reaches the head of the queue and is processed by GuC. + * + * Ideally there would be a helper to report the progress of a given + * command through the CT. However, that would require a significant + * amount of work in the CT layer. In the meantime, provide a reasonable + * estimation of the worst case latency it should take for the entire + * queue to drain. And therefore, how long a caller should wait before + * giving up on their request. The current estimate is based on empirical + * measurement of a test that fills the buffer with context creation and + * destruction requests as they seem to be the slowest operation. + */ +long intel_guc_ct_max_queue_time_jiffies(void) +{ + /* + * A 4KB buffer full of context destroy commands takes a little + * over a second to process so bump that to 2s to be super safe. + */ + return (CTB_H2G_BUFFER_SIZE * HZ) / SZ_2K; +} + static void ct_receive_tasklet_func(struct tasklet_struct *t); static void ct_incoming_request_worker_func(struct work_struct *w); @@ -1112,12 +1139,11 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r ret = 0; break; case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED: - CT_ERROR(ct, "Received GuC crash dump notification!\n"); - ret = 0; - break; case INTEL_GUC_ACTION_NOTIFY_EXCEPTION: - CT_ERROR(ct, "Received GuC exception notification!\n"); - ret = 0; + ret = intel_guc_crash_process_msg(guc, action); + break; + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE: + ret = intel_guc_tlb_invalidation_done(guc, payload, len); break; default: ret = -EOPNOTSUPP; @@ -1190,9 +1216,17 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ switch (action) { case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE: + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE: g2h_release_space(ct, request->size); } + /* + * TLB invalidation responses must be handled immediately as processing + * of other G2H notifications may be blocked by an invalidation request. + */ + if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) + return ct_process_request(ct, request); + spin_lock_irqsave(&ct->requests.lock, flags); list_add_tail(&request->link, &ct->requests.incoming); spin_unlock_irqrestore(&ct->requests.lock, flags); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 58e42901ff..2c4bb9a941 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -104,6 +104,8 @@ struct intel_guc_ct { #endif }; +long intel_guc_ct_max_queue_time_jiffies(void); + void intel_guc_ct_init_early(struct intel_guc_ct *ct); int intel_guc_ct_init(struct intel_guc_ct *ct); void intel_guc_ct_fini(struct intel_guc_ct *ct); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index b4d56eccfb..8ae1846431 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -22,6 +22,7 @@ /* Payload length only i.e. don't include G2H header length */ #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2 #define G2H_LEN_DW_DEREGISTER_CONTEXT 1 +#define G2H_LEN_DW_INVALIDATE_TLB 1 #define GUC_CONTEXT_DISABLE 0 #define GUC_CONTEXT_ENABLE 1 @@ -100,6 +101,7 @@ #define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17) #define GUC_WA_POLLCS BIT(18) #define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21) +#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 477df260ae..2dfb07cc4b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -138,17 +138,6 @@ static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value) return ret > 0 ? -EPROTO : ret; } -static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) -{ - u32 request[] = { - GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, - SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), - id, - }; - - return intel_guc_send(guc, request, ARRAY_SIZE(request)); -} - static bool slpc_is_running(struct intel_guc_slpc *slpc) { return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING; @@ -199,15 +188,6 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) return ret; } -static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id) -{ - struct intel_guc *guc = slpc_to_guc(slpc); - - GEM_BUG_ON(id >= SLPC_MAX_PARAM); - - return guc_action_slpc_unset_param(guc, id); -} - static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) { struct intel_guc *guc = slpc_to_guc(slpc); @@ -672,49 +652,6 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc) slpc->boost_freq = slpc->rp0_freq; } -/** - * intel_guc_slpc_override_gucrc_mode() - override GUCRC mode - * @slpc: pointer to intel_guc_slpc. - * @mode: new value of the mode. - * - * This function will override the GUCRC mode. - * - * Return: 0 on success, non-zero error code on failure. - */ -int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode) -{ - int ret; - struct drm_i915_private *i915 = slpc_to_i915(slpc); - intel_wakeref_t wakeref; - - if (mode >= SLPC_GUCRC_MODE_MAX) - return -EINVAL; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode); - if (ret) - guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n", - mode, ERR_PTR(ret)); - } - - return ret; -} - -int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc) -{ - struct drm_i915_private *i915 = slpc_to_i915(slpc); - intel_wakeref_t wakeref; - int ret = 0; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE); - if (ret) - guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret)); - } - - return ret; -} - /* * intel_guc_slpc_enable() - Start SLPC * @slpc: pointer to intel_guc_slpc. diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index 597eb5413d..6ac6503c39 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -44,8 +44,6 @@ int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val); void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); -int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc); -int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode); int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 836e4d9d65..17df71117c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i915_irq.h" #include "i915_trace.h" /** @@ -1690,9 +1691,7 @@ static void guc_engine_reset_prepare(struct intel_engine_cs *engine) * Wa_22011802037: In addition to stopping the cs, we need * to wait for any pending mi force wakeups */ - if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) || - (GRAPHICS_VER(engine->i915) >= 11 && - GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) { + if (intel_engine_reset_needs_wa_22011802037(engine->gt)) { intel_engine_stop_cs(engine); intel_engine_wait_for_pending_mi_fw(engine); } @@ -1798,6 +1797,20 @@ next_context: intel_context_put(parent); } +void wake_up_all_tlb_invalidate(struct intel_guc *guc) +{ + struct intel_guc_tlb_wait *wait; + unsigned long i; + + if (!intel_guc_tlb_invalidation_is_available(guc)) + return; + + xa_lock_irq(&guc->tlb_lookup); + xa_for_each(&guc->tlb_lookup, i, wait) + wake_up(&wait->wq); + xa_unlock_irq(&guc->tlb_lookup); +} + void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled) { struct intel_context *ce; @@ -1923,6 +1936,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) /* GuC is blown away, drop all references to contexts */ xa_destroy(&guc->context_lookup); + + /* + * Wedged GT won't respond to any TLB invalidation request. Simply + * release all the blocked waiters. + */ + wake_up_all_tlb_invalidate(guc); } void intel_guc_submission_reset_finish(struct intel_guc *guc) @@ -1945,11 +1964,65 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) intel_guc_global_policies_update(guc); enable_submission(guc); intel_gt_unpark_heartbeats(guc_to_gt(guc)); + + /* + * The full GT reset will have cleared the TLB caches and flushed the + * G2H message queue; we can release all the blocked waiters. + */ + wake_up_all_tlb_invalidate(guc); } static void destroyed_worker_func(struct work_struct *w); static void reset_fail_worker_func(struct work_struct *w); +bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc) +{ + return HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915) && + intel_guc_is_ready(guc); +} + +static int init_tlb_lookup(struct intel_guc *guc) +{ + struct intel_guc_tlb_wait *wait; + int err; + + if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) + return 0; + + xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC); + + wait = kzalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return -ENOMEM; + + init_waitqueue_head(&wait->wq); + + /* Preallocate a shared id for use under memory pressure. */ + err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait, + xa_limit_32b, &guc->next_seqno, GFP_KERNEL); + if (err < 0) { + kfree(wait); + return err; + } + + return 0; +} + +static void fini_tlb_lookup(struct intel_guc *guc) +{ + struct intel_guc_tlb_wait *wait; + + if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) + return; + + wait = xa_load(&guc->tlb_lookup, guc->serial_slot); + if (wait && wait->busy) + guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n"); + kfree(wait); + + xa_destroy(&guc->tlb_lookup); +} + /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. @@ -1968,11 +2041,15 @@ int intel_guc_submission_init(struct intel_guc *guc) return ret; } + ret = init_tlb_lookup(guc); + if (ret) + goto destroy_pool; + guc->submission_state.guc_ids_bitmap = bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); if (!guc->submission_state.guc_ids_bitmap) { ret = -ENOMEM; - goto destroy_pool; + goto destroy_tlb; } guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; @@ -1981,9 +2058,10 @@ int intel_guc_submission_init(struct intel_guc *guc) return 0; +destroy_tlb: + fini_tlb_lookup(guc); destroy_pool: guc_lrc_desc_pool_destroy_v69(guc); - return ret; } @@ -1996,6 +2074,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_lrc_desc_pool_destroy_v69(guc); i915_sched_engine_put(guc->sched_engine); bitmap_free(guc->submission_state.guc_ids_bitmap); + fini_tlb_lookup(guc); guc->submission_initialized = false; } @@ -4299,7 +4378,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) /* Wa_14014475959:dg2 */ if (engine->class == COMPUTE_CLASS) - if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) || + if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) || IS_DG2(engine->i915)) engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; @@ -4626,6 +4705,154 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) return ce; } +static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno) +{ + struct intel_guc_tlb_wait *wait; + unsigned long flags; + + xa_lock_irqsave(&guc->tlb_lookup, flags); + wait = xa_load(&guc->tlb_lookup, seqno); + + if (wait) + wake_up(&wait->wq); + else + guc_dbg(guc, + "Stale TLB invalidation response with seqno %d\n", seqno); + + xa_unlock_irqrestore(&guc->tlb_lookup, flags); +} + +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, + const u32 *payload, u32 len) +{ + if (len < 1) + return -EPROTO; + + wait_wake_outstanding_tlb_g2h(guc, payload[0]); + return 0; +} + +static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout) +{ + /* + * This is equivalent to wait_woken() with the exception that + * we do not wake up early if the kthread task has been completed. + * As we are called from page reclaim in any task context, + * we may be invoked from stopped kthreads, but we *must* + * complete the wait from the HW. + */ + do { + set_current_state(TASK_UNINTERRUPTIBLE); + if (wq_entry->flags & WQ_FLAG_WOKEN) + break; + + timeout = schedule_timeout(timeout); + } while (timeout); + + /* See wait_woken() and woken_wake_function() */ + __set_current_state(TASK_RUNNING); + smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); + + return timeout; +} + +static bool intel_gt_is_enabled(const struct intel_gt *gt) +{ + /* Check if GT is wedged or suspended */ + if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915)) + return false; + return true; +} + +static int guc_send_invalidate_tlb(struct intel_guc *guc, + enum intel_guc_tlb_invalidation_type type) +{ + struct intel_guc_tlb_wait _wq, *wq = &_wq; + struct intel_gt *gt = guc_to_gt(guc); + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err; + u32 seqno; + u32 action[] = { + INTEL_GUC_ACTION_TLB_INVALIDATION, + 0, + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) | + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK, + INTEL_GUC_TLB_INVAL_MODE_HEAVY) | + INTEL_GUC_TLB_INVAL_FLUSH_CACHE, + }; + u32 size = ARRAY_SIZE(action); + + /* + * Early guard against GT enablement. TLB invalidation should not be + * attempted if the GT is disabled due to suspend/wedge. + */ + if (!intel_gt_is_enabled(gt)) + return -EINVAL; + + init_waitqueue_head(&_wq.wq); + + if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq, + xa_limit_32b, &guc->next_seqno, + GFP_ATOMIC | __GFP_NOWARN) < 0) { + /* Under severe memory pressure? Serialise TLB allocations */ + xa_lock_irq(&guc->tlb_lookup); + wq = xa_load(&guc->tlb_lookup, guc->serial_slot); + wait_event_lock_irq(wq->wq, + !READ_ONCE(wq->busy), + guc->tlb_lookup.xa_lock); + /* + * Update wq->busy under lock to ensure only one waiter can + * issue the TLB invalidation command using the serial slot at a + * time. The condition is set to true before releasing the lock + * so that other caller continue to wait until woken up again. + */ + wq->busy = true; + xa_unlock_irq(&guc->tlb_lookup); + + seqno = guc->serial_slot; + } + + action[1] = seqno; + + add_wait_queue(&wq->wq, &wait); + + /* This is a critical reclaim path and thus we must loop here. */ + err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true); + if (err) + goto out; + + /* + * Late guard against GT enablement. It is not an error for the TLB + * invalidation to time out if the GT is disabled during the process + * due to suspend/wedge. In fact, the TLB invalidation is cancelled + * in this case. + */ + if (!must_wait_woken(&wait, intel_guc_ct_max_queue_time_jiffies()) && + intel_gt_is_enabled(gt)) { + guc_err(guc, + "TLB invalidation response timed out for seqno %u\n", seqno); + err = -ETIME; + } +out: + remove_wait_queue(&wq->wq, &wait); + if (seqno != guc->serial_slot) + xa_erase_irq(&guc->tlb_lookup, seqno); + + return err; +} + +/* Send a H2G command to invalidate the TLBs at engine level and beyond. */ +int intel_guc_invalidate_tlb_engines(struct intel_guc *guc) +{ + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES); +} + +/* Send a H2G command to invalidate the GuC's internal TLB. */ +int intel_guc_invalidate_tlb_guc(struct intel_guc *guc) +{ + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC); +} + int intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) @@ -4805,19 +5032,19 @@ static void guc_context_replay(struct intel_context *ce) static void guc_handle_context_reset(struct intel_guc *guc, struct intel_context *ce) { + bool capture = intel_context_is_schedulable(ce); + trace_intel_context_reset(ce); - guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", + guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", + capture ? "Got" : "Ignoring", ce->guc_id.id, ce->engine->name, str_yes_no(intel_context_is_exiting(ce)), str_yes_no(intel_context_is_banned(ce))); - if (likely(intel_context_is_schedulable(ce))) { + if (capture) { capture_error_state(guc, ce); guc_context_replay(ce); - } else { - guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s", - ce->guc_id.id, ce->engine->name); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 98b103375b..27f6561dd7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -688,6 +688,8 @@ void intel_uc_suspend(struct intel_uc *uc) /* flush the GSC worker */ intel_gsc_uc_flush_work(&uc->gsc); + wake_up_all_tlb_invalidate(guc); + if (!intel_guc_is_ready(guc)) { guc->interrupts.enabled = false; return; @@ -736,6 +738,11 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) intel_gsc_uc_resume(&uc->gsc); + if (intel_guc_tlb_invalidation_is_available(guc)) { + intel_guc_invalidate_tlb_engines(guc); + intel_guc_invalidate_tlb_guc(guc); + } + return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 8be005de1d..362639162e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -88,12 +88,12 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * security fixes, etc. to be enabled. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \ - fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \ - fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \ - fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \ + fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 12, 1)) \ + fw_def(DG2, 0, guc_maj(dg2, 70, 12, 1)) \ + fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 12, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \ - fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \ + fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 12, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \ fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \ @@ -132,6 +132,17 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0)) /* + * The GSC FW has multiple version (see intel_gsc_uc.h for details); since what + * we care about is the interface, we use the compatibility version in the + * binary names. + * Same as with the GuC, a major version bump indicate a + * backward-incompatible change, while a minor version bump indicates a + * backward-compatible one, so we use only the former in the file name. + */ +#define INTEL_GSC_FIRMWARE_DEFS(fw_def, gsc_def) \ + fw_def(METEORLAKE, 0, gsc_def(mtl, 1, 0)) + +/* * Set of macros for producing a list of filenames from the above table. */ #define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \ @@ -166,6 +177,9 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, #define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \ __MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_) +#define MAKE_GSC_FW_PATH(prefix_, major_, minor_) \ + __MAKE_UC_FW_PATH_MAJOR(prefix_, "gsc", major_) + /* * All blobs need to be declared via MODULE_FIRMWARE(). * This first expansion of the table macros is solely to provide @@ -176,6 +190,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP) INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP, MAKE_HUC_FW_PATH_GSC) +INTEL_GSC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GSC_FW_PATH) /* * The next expansion of the table macros (in __uc_fw_auto_select below) provides @@ -225,6 +240,10 @@ struct __packed uc_fw_blob { #define HUC_FW_BLOB_GSC(prefix_) \ UC_FW_BLOB_NEW(0, 0, 0, true, MAKE_HUC_FW_PATH_GSC(prefix_)) +#define GSC_FW_BLOB(prefix_, major_, minor_) \ + UC_FW_BLOB_NEW(major_, minor_, 0, true, \ + MAKE_GSC_FW_PATH(prefix_, major_, minor_)) + struct __packed uc_fw_platform_requirement { enum intel_platform p; u8 rev; /* first platform rev using this FW */ @@ -251,9 +270,14 @@ static const struct uc_fw_platform_requirement blobs_huc[] = { INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC) }; +static const struct uc_fw_platform_requirement blobs_gsc[] = { + INTEL_GSC_FIRMWARE_DEFS(MAKE_FW_LIST, GSC_FW_BLOB) +}; + static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = { [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, + [INTEL_UC_FW_TYPE_GSC] = { blobs_gsc, ARRAY_SIZE(blobs_gsc) }, }; static void @@ -267,14 +291,6 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) bool found; /* - * GSC FW support is still not fully in place, so we're not defining - * the FW blob yet because we don't want the driver to attempt to load - * it until we're ready for it. - */ - if (uc_fw->type == INTEL_UC_FW_TYPE_GSC) - return; - - /* * The only difference between the ADL GuC FWs is the HWConfig support. * ADL-N does not support HWConfig, so we should use the same binary as * ADL-S, otherwise the GuC might attempt to fetch a config table that |