summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/pxp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/pxp')
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c502
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c143
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h43
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h61
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h42
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c91
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c445
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h43
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c71
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.h13
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c108
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c64
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h37
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_regs.h27
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c186
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h25
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c406
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.h22
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h120
23 files changed, 2565 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
new file mode 100644
index 0000000000..38ec754d0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+
+#include "gem/i915_gem_context.h"
+
+#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_gsccs.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_regs.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
+ * It allows execution and flip to display of protected (i.e. encrypted)
+ * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
+ *
+ * Objects can opt-in to PXP encryption at creation time via the
+ * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
+ * correctly protected they must be used in conjunction with a context created
+ * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
+ * of those two uapi flags for details and restrictions.
+ *
+ * Protected objects are tied to a pxp session; currently we only support one
+ * session, which i915 manages and whose index is available in the uapi
+ * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
+ * protected objects.
+ * The session is invalidated by the HW when certain events occur (e.g.
+ * suspend/resume). When this happens, all the objects that were used with the
+ * session are marked as invalid and all contexts marked as using protected
+ * content are banned. Any further attempt at using them in an execbuf call is
+ * rejected, while flips are converted to black frames.
+ *
+ * Some of the PXP setup operations are performed by the Management Engine,
+ * which is handled by the mei driver; communication between i915 and mei is
+ * performed via the mei_pxp component module.
+ */
+
+bool intel_pxp_is_supported(const struct intel_pxp *pxp)
+{
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp;
+}
+
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
+}
+
+bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
+}
+
+static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
+{
+ u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+
+ intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
+}
+
+static void kcr_pxp_enable(const struct intel_pxp *pxp)
+{
+ kcr_pxp_set_status(pxp, true);
+}
+
+static void kcr_pxp_disable(const struct intel_pxp *pxp)
+{
+ kcr_pxp_set_status(pxp, false);
+}
+
+static int create_vcs_context(struct intel_pxp *pxp)
+{
+ static struct lock_class_key pxp_lock;
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int i;
+
+ /*
+ * Find the first VCS engine present. We're guaranteed there is one
+ * if we're in this function due to the check in has_pxp
+ */
+ for (i = 0, engine = NULL; !engine; i++)
+ engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
+
+ GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_PXP_ADDR,
+ &pxp_lock, "pxp_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
+ return PTR_ERR(ce);
+ }
+
+ pxp->ce = ce;
+
+ return 0;
+}
+
+static void destroy_vcs_context(struct intel_pxp *pxp)
+{
+ if (pxp->ce)
+ intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
+}
+
+static void pxp_init_full(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ int ret;
+
+ /*
+ * we'll use the completion to check if there is a termination pending,
+ * so we start it as completed and we reinit it when a termination
+ * is triggered.
+ */
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+
+ if (pxp->ctrl_gt->type == GT_MEDIA)
+ pxp->kcr_base = MTL_KCR_BASE;
+ else
+ pxp->kcr_base = GEN12_KCR_BASE;
+
+ intel_pxp_session_management_init(pxp);
+
+ ret = create_vcs_context(pxp);
+ if (ret)
+ return;
+
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ ret = intel_pxp_gsccs_init(pxp);
+ else
+ ret = intel_pxp_tee_component_init(pxp);
+ if (ret)
+ goto out_context;
+
+ drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
+
+ return;
+
+out_context:
+ destroy_vcs_context(pxp);
+}
+
+static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
+{
+ /*
+ * NOTE: Only certain platforms require PXP-tee-backend dependencies
+ * for HuC authentication. For now, its limited to DG2.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
+ intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
+ return to_gt(i915);
+
+ return NULL;
+}
+
+static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
+ return NULL;
+
+ /*
+ * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
+ * on the media GT. NOTE: if we have a media-tile with a GSC-engine,
+ * the VDBOX is already present so skip that check. We also have to
+ * ensure the GSC and HUC firmware are coming online
+ */
+ if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) &&
+ intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) &&
+ intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw))
+ return i915->media_gt;
+
+ /*
+ * Else we rely on mei-pxp module but only on legacy platforms
+ * prior to having separate media GTs and has a valid VDBOX.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
+ return to_gt(i915);
+
+ return NULL;
+}
+
+int intel_pxp_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ bool is_full_feature = false;
+
+ /*
+ * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
+ * we still need it if PXP's backend tee transport is needed.
+ */
+ gt = find_gt_for_required_protected_content(i915);
+ if (gt)
+ is_full_feature = true;
+ else
+ gt = find_gt_for_required_teelink(i915);
+
+ if (!gt)
+ return -ENODEV;
+
+ /*
+ * At this point, we will either enable full featured PXP capabilities
+ * including session and object management, or we will init the backend tee
+ * channel for internal users such as HuC loading by GSC
+ */
+ i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL);
+ if (!i915->pxp)
+ return -ENOMEM;
+
+ /* init common info used by all feature-mode usages*/
+ i915->pxp->ctrl_gt = gt;
+ mutex_init(&i915->pxp->tee_mutex);
+
+ /*
+ * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
+ * such as DG2, we can skip the init of the full PXP session/object management
+ * and just init the tee channel.
+ */
+ if (is_full_feature)
+ pxp_init_full(i915->pxp);
+ else
+ intel_pxp_tee_component_init(i915->pxp);
+
+ return 0;
+}
+
+void intel_pxp_fini(struct drm_i915_private *i915)
+{
+ if (!i915->pxp)
+ return;
+
+ i915->pxp->arb_is_valid = false;
+
+ if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0))
+ intel_pxp_gsccs_fini(i915->pxp);
+ else
+ intel_pxp_tee_component_fini(i915->pxp);
+
+ destroy_vcs_context(i915->pxp);
+
+ kfree(i915->pxp);
+ i915->pxp = NULL;
+}
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+ reinit_completion(&pxp->termination);
+}
+
+static void pxp_queue_termination(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+
+ /*
+ * We want to get the same effect as if we received a termination
+ * interrupt, so just pretend that we did.
+ */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST;
+ queue_work(system_unbound_wq, &pxp->session_work);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
+}
+
+int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp)
+{
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ return GSCFW_MAX_ROUND_TRIP_LATENCY_MS;
+ else
+ return 250;
+}
+
+static int __pxp_global_teardown_final(struct intel_pxp *pxp)
+{
+ int timeout;
+
+ if (!pxp->arb_is_valid)
+ return 0;
+ /*
+ * To ensure synchronous and coherent session teardown completion
+ * in response to suspend or shutdown triggers, don't use a worker.
+ */
+ intel_pxp_mark_termination_in_progress(pxp);
+ intel_pxp_terminate(pxp, false);
+
+ timeout = intel_pxp_get_backend_timeout_ms(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
+{
+ int timeout;
+
+ if (pxp->arb_is_valid)
+ return 0;
+ /*
+ * The arb-session is currently inactive and we are doing a reset and restart
+ * due to a runtime event. Use the worker that was designed for this.
+ */
+ pxp_queue_termination(pxp);
+
+ timeout = intel_pxp_get_backend_timeout_ms(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void intel_pxp_end(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (__pxp_global_teardown_final(pxp))
+ drm_dbg(&i915->drm, "PXP end timed out\n");
+
+ mutex_unlock(&pxp->arb_mutex);
+
+ intel_pxp_fini_hw(pxp);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/*
+ * this helper is used by both intel_pxp_start and by
+ * the GET_PARAM IOCTL that user space calls. Thus, the
+ * return values here should match the UAPI spec.
+ */
+int intel_pxp_get_readiness_status(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) {
+ if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250))
+ return 2;
+ } else {
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return 2;
+ }
+ return 1;
+}
+
+/*
+ * the arb session is restarted from the irq work when we receive the
+ * termination completion interrupt
+ */
+int intel_pxp_start(struct intel_pxp *pxp)
+{
+ int ret = 0;
+
+ ret = intel_pxp_get_readiness_status(pxp);
+ if (ret < 0)
+ return ret;
+ else if (ret > 1)
+ return -EIO; /* per UAPI spec, user may retry later */
+
+ mutex_lock(&pxp->arb_mutex);
+
+ ret = __pxp_global_teardown_restart(pxp);
+ if (ret)
+ goto unlock;
+
+ /* make sure the compiler doesn't optimize the double access */
+ barrier();
+
+ if (!pxp->arb_is_valid)
+ ret = -EIO;
+
+unlock:
+ mutex_unlock(&pxp->arb_mutex);
+ return ret;
+}
+
+void intel_pxp_init_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_enable(pxp);
+ intel_pxp_irq_enable(pxp);
+}
+
+void intel_pxp_fini_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_disable(pxp);
+ intel_pxp_irq_disable(pxp);
+}
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ if (!i915_gem_object_is_protected(obj))
+ return -EINVAL;
+
+ GEM_BUG_ON(!pxp->key_instance);
+
+ /*
+ * If this is the first time we're using this object, it's not
+ * encrypted yet; it will be encrypted with the current key, so mark it
+ * as such. If the object is already encrypted, check instead if the
+ * used key is still valid.
+ */
+ if (!obj->pxp_key_instance && assign)
+ obj->pxp_key_instance = pxp->key_instance;
+
+ if (obj->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+void intel_pxp_invalidate(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct i915_gem_context *ctx, *cn;
+
+ /* ban all contexts marked as protected */
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ if (likely(!i915_gem_context_uses_protected_content(ctx))) {
+ i915_gem_context_put(ctx);
+ continue;
+ }
+
+ spin_unlock_irq(&i915->gem.contexts.lock);
+
+ /*
+ * By the time we get here we are either going to suspend with
+ * quiesced execution or the HW keys are already long gone and
+ * in this case it is worthless to attempt to close the context
+ * and wait for its execution. It will hang the GPU if it has
+ * not already. So, as a fast mitigation, we can ban the
+ * context as quick as we can. That might race with the
+ * execbuffer, but currently this is the best that can be done.
+ */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ intel_context_ban(ce, NULL);
+ i915_gem_context_unlock_engines(ctx);
+
+ /*
+ * The context has been banned, no need to keep the wakeref.
+ * This is safe from races because the only other place this
+ * is touched is context_release and we're holding a ctx ref
+ */
+ if (ctx->pxp_wakeref) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ ctx->pxp_wakeref);
+ ctx->pxp_wakeref = 0;
+ }
+
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock_irq(&i915->gem.contexts.lock);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
new file mode 100644
index 0000000000..17254c3f12
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_pxp;
+
+bool intel_pxp_is_supported(const struct intel_pxp *pxp);
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp);
+bool intel_pxp_is_active(const struct intel_pxp *pxp);
+
+int intel_pxp_init(struct drm_i915_private *i915);
+void intel_pxp_fini(struct drm_i915_private *i915);
+
+void intel_pxp_init_hw(struct intel_pxp *pxp);
+void intel_pxp_fini_hw(struct intel_pxp *pxp);
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+
+int intel_pxp_get_readiness_status(struct intel_pxp *pxp);
+int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp);
+int intel_pxp_start(struct intel_pxp *pxp);
+void intel_pxp_end(struct intel_pxp *pxp);
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign);
+
+void intel_pxp_invalidate(struct intel_pxp *pxp);
+
+#endif /* __INTEL_PXP_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
new file mode 100644
index 0000000000..0eee51c4a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_trace.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_types.h"
+
+/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+
+static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
+{
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp off */
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* select session */
+ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
+
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp on */
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+
+ *cs++ = MFX_WAIT_PXP;
+
+ return cs;
+}
+
+static u32 *pxp_emit_inline_termination(u32 *cs)
+{
+ /* session inline termination */
+ *cs++ = CRYPTO_KEY_EXCHANGE;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
+{
+ cs = pxp_emit_session_selection(cs, idx);
+ cs = pxp_emit_inline_termination(cs);
+
+ return cs;
+}
+
+static u32 *pxp_emit_wait(u32 *cs)
+{
+ /* wait for cmds to go through */
+ *cs++ = MFX_WAIT_PXP;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/*
+ * if we ever need to terminate more than one session, we can submit multiple
+ * selections and terminations back-to-back with a single wait at the end
+ */
+#define SELECTION_LEN 10
+#define TERMINATION_LEN 2
+#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
+#define WAIT_LEN 2
+
+static void pxp_request_commit(struct i915_request *rq)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
+{
+ struct i915_request *rq;
+ struct intel_context *ce = pxp->ce;
+ u32 *cs;
+ int err = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ cs = pxp_emit_session_termination(cs, id);
+ cs = pxp_emit_wait(cs);
+
+ intel_ring_advance(rq, cs);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ pxp_request_commit(rq);
+
+ if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
new file mode 100644
index 0000000000..6d62995435
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_CMD_H__
+#define __INTEL_PXP_CMD_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx);
+
+#endif /* __INTEL_PXP_CMD_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
new file mode 100644
index 0000000000..26f7d9f01b
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_42_H__
+#define __INTEL_PXP_FW_INTERFACE_42_H__
+
+#include <linux/types.h>
+#include "intel_pxp_cmd_interface_cmn.h"
+
+/* PXP-Opcode for Init Session */
+#define PXP42_CMDID_INIT_SESSION 0x1e
+
+/* PXP-Opcode for Invalidate Stream Key */
+#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+
+/* PXP-Input-Packet: Init Session (Arb-Session) */
+struct pxp42_create_arb_in {
+ struct pxp_cmd_header header;
+ u32 protection_mode;
+#define PXP42_ARB_SESSION_MODE_HEAVY 0x2
+ u32 session_id;
+} __packed;
+
+/* PXP-Output-Packet: Init Session */
+struct pxp42_create_arb_out {
+ struct pxp_cmd_header header;
+} __packed;
+
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
new file mode 100644
index 0000000000..0165d38fbe
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_43_H__
+#define __INTEL_PXP_FW_INTERFACE_43_H__
+
+#include <linux/types.h>
+#include "intel_pxp_cmd_interface_cmn.h"
+
+/* PXP-Cmd-Op definitions */
+#define PXP43_CMDID_START_HUC_AUTH 0x0000003A
+#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
+#define PXP43_CMDID_INIT_SESSION 0x00000036
+
+/* PXP-Packet sizes for MTL's GSCCS-HECI instruction */
+#define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K)
+
+/* PXP-Packet size for MTL's NEW_HUC_AUTH instruction */
+#define PXP43_HUC_AUTH_INOUT_SIZE (SZ_4K)
+
+/* PXP-Input-Packet: HUC Load and Authentication */
+struct pxp43_start_huc_auth_in {
+ struct pxp_cmd_header header;
+ __le64 huc_base_address;
+} __packed;
+
+/* PXP-Input-Packet: HUC Auth-only */
+struct pxp43_new_huc_auth_in {
+ struct pxp_cmd_header header;
+ u64 huc_base_address;
+ u32 huc_size;
+} __packed;
+
+/* PXP-Output-Packet: HUC Load and Authentication or Auth-only */
+struct pxp43_huc_auth_out {
+ struct pxp_cmd_header header;
+} __packed;
+
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_in {
+ struct pxp_cmd_header header;
+ /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ #define PXP43_INIT_SESSION_VALID BIT(0)
+ #define PXP43_INIT_SESSION_APPTYPE BIT(1)
+ #define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
+ u32 protection_mode;
+ #define PXP43_INIT_SESSION_PROTECTION_ARB 0x2
+ u32 sub_session_id;
+ u32 init_flags;
+ u32 rsvd[12];
+} __packed;
+
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_out {
+ struct pxp_cmd_header header;
+ u32 rsvd[8];
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_43_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
new file mode 100644
index 0000000000..6f6541d5e4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_CMN_H__
+#define __INTEL_PXP_FW_INTERFACE_CMN_H__
+
+#include <linux/types.h>
+
+#define PXP_APIVER(x, y) (((x) & 0xFFFF) << 16 | ((y) & 0xFFFF))
+
+/*
+ * there are a lot of status codes for PXP, but we only define the cross-API
+ * common ones that we actually can handle in the kernel driver. Other failure
+ * codes should be printed to error msg for debug.
+ */
+enum pxp_status {
+ PXP_STATUS_SUCCESS = 0x0,
+ PXP_STATUS_ERROR_API_VERSION = 0x1002,
+ PXP_STATUS_NOT_READY = 0x100e,
+ PXP_STATUS_PLATFCONFIG_KF1_NOVERIF = 0x101a,
+ PXP_STATUS_PLATFCONFIG_KF1_BAD = 0x101f,
+ PXP_STATUS_OP_NOT_PERMITTED = 0x4013
+};
+
+/* Common PXP FW message header */
+struct pxp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ union {
+ u32 status; /* out */
+ u32 stream_id; /* in */
+#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0)
+#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1)
+#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2)
+ };
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_CMN_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
new file mode 100644
index 0000000000..e07c5b3807
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_debugfs.h"
+#include "intel_pxp_gsccs.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_types.h"
+
+static int pxp_info_show(struct seq_file *m, void *data)
+{
+ struct intel_pxp *pxp = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_pxp_is_enabled(pxp)) {
+ drm_printf(&p, "pxp disabled\n");
+ return 0;
+ }
+
+ drm_printf(&p, "active: %s\n", str_yes_no(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pxp_info);
+
+static int pxp_terminate_get(void *data, u64 *val)
+{
+ /* nothing to read */
+ return -EPERM;
+}
+
+static int pxp_terminate_set(void *data, u64 val)
+{
+ struct intel_pxp *pxp = data;
+ struct intel_gt *gt = pxp->ctrl_gt;
+ int timeout_ms;
+
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(gt->irq_lock);
+
+ timeout_ms = intel_pxp_get_backend_timeout_ms(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(timeout_ms)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
+
+void intel_pxp_debugfs_register(struct intel_pxp *pxp)
+{
+ struct drm_minor *minor;
+ struct dentry *pxproot;
+
+ if (!intel_pxp_is_supported(pxp))
+ return;
+
+ minor = pxp->ctrl_gt->i915->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+ pxproot = debugfs_create_dir("pxp", minor->debugfs_root);
+ if (IS_ERR(pxproot))
+ return;
+
+ debugfs_create_file("info", 0444, pxproot,
+ pxp, &pxp_info_fops);
+
+ debugfs_create_file("terminate_state", 0644, pxproot,
+ pxp, &pxp_terminate_fops);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
new file mode 100644
index 0000000000..299382b59e
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_DEBUGFS_H__
+#define __INTEL_PXP_DEBUGFS_H__
+
+struct intel_pxp;
+struct dentry;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_debugfs_register(struct intel_pxp *pxp);
+#else
+static inline void
+intel_pxp_debugfs_register(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
new file mode 100644
index 0000000000..2a600184a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2023 Intel Corporation.
+ */
+
+#include "gem/i915_gem_internal.h"
+
+#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
+#include "gt/uc/intel_gsc_fw.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
+
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_cmd_interface_42.h"
+#include "intel_pxp_cmd_interface_43.h"
+#include "intel_pxp_gsccs.h"
+#include "intel_pxp_types.h"
+
+static bool
+is_fw_err_platform_config(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const char *
+fw_err_to_string(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ return "ERR_API_VERSION";
+ case PXP_STATUS_NOT_READY:
+ return "ERR_NOT_READY";
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return "ERR_PLATFORM_CONFIG";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int
+gsccs_send_message(struct intel_pxp *pxp,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max,
+ size_t *msg_out_len,
+ u64 *gsc_msg_handle_retry)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct drm_i915_private *i915 = gt->i915;
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+ struct intel_gsc_mtl_header *header = exec_res->pkt_vaddr;
+ struct intel_gsc_heci_non_priv_pkt pkt;
+ size_t max_msg_size;
+ u32 reply_size;
+ int ret;
+
+ if (!exec_res->ce)
+ return -ENODEV;
+
+ max_msg_size = PXP43_MAX_HECI_INOUT_SIZE - sizeof(*header);
+
+ if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
+ return -ENOSPC;
+
+ if (!exec_res->pkt_vma || !exec_res->bb_vma)
+ return -ENOENT;
+
+ GEM_BUG_ON(exec_res->pkt_vma->size < (2 * PXP43_MAX_HECI_INOUT_SIZE));
+
+ mutex_lock(&pxp->tee_mutex);
+
+ memset(header, 0, sizeof(*header));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_PXP,
+ msg_in_size + sizeof(*header),
+ exec_res->host_session_handle);
+
+ /* check if this is a host-session-handle cleanup call (empty packet) */
+ if (!msg_in && !msg_out)
+ header->flags |= GSC_INFLAG_MSG_CLEANUP;
+
+ /* copy caller provided gsc message handle if this is polling for a prior msg completion */
+ header->gsc_message_handle = *gsc_msg_handle_retry;
+
+ /* NOTE: zero size packets are used for session-cleanups */
+ if (msg_in && msg_in_size)
+ memcpy(exec_res->pkt_vaddr + sizeof(*header), msg_in, msg_in_size);
+
+ pkt.addr_in = i915_vma_offset(exec_res->pkt_vma);
+ pkt.size_in = header->message_size;
+ pkt.addr_out = pkt.addr_in + PXP43_MAX_HECI_INOUT_SIZE;
+ pkt.size_out = msg_out_size_max + sizeof(*header);
+ pkt.heci_pkt_vma = exec_res->pkt_vma;
+ pkt.bb_vma = exec_res->bb_vma;
+
+ /*
+ * Before submitting, let's clear-out the validity marker on the reply offset.
+ * We use offset PXP43_MAX_HECI_INOUT_SIZE for reply location so point header there.
+ */
+ header = exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE;
+ header->validity_marker = 0;
+
+ ret = intel_gsc_uc_heci_cmd_submit_nonpriv(&gt->uc.gsc,
+ exec_res->ce, &pkt, exec_res->bb_vaddr,
+ GSC_REPLY_LATENCY_MS);
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc PXP msg (%d)\n", ret);
+ goto unlock;
+ }
+
+ /* Response validity marker, status and busyness */
+ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "gsc PXP reply with invalid validity marker\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->status != 0) {
+ drm_dbg(&i915->drm, "gsc PXP reply status has error = 0x%08x\n",
+ header->status);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->flags & GSC_OUTFLAG_MSG_PENDING) {
+ drm_dbg(&i915->drm, "gsc PXP reply is busy\n");
+ /*
+ * When the GSC firmware replies with pending bit, it means that the requested
+ * operation has begun but the completion is pending and the caller needs
+ * to re-request with the gsc_message_handle that was returned by the firmware.
+ * until the pending bit is turned off.
+ */
+ *gsc_msg_handle_retry = header->gsc_message_handle;
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_size_max) {
+ drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%zu)\n",
+ reply_size, msg_out_size_max);
+ reply_size = msg_out_size_max;
+ }
+
+ if (msg_out)
+ memcpy(msg_out, exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE + sizeof(*header),
+ reply_size);
+ if (msg_out_len)
+ *msg_out_len = reply_size;
+
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+static int
+gsccs_send_message_retry_complete(struct intel_pxp *pxp,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max,
+ size_t *msg_out_len)
+{
+ u64 gsc_session_retry = 0;
+ int ret, tries = 0;
+
+ /*
+ * Keep sending request if GSC firmware was busy. Based on fw specs +
+ * sw overhead (and testing) we expect a worst case pending-bit delay of
+ * GSC_PENDING_RETRY_MAXCOUNT x GSC_PENDING_RETRY_PAUSE_MS millisecs.
+ */
+ do {
+ ret = gsccs_send_message(pxp, msg_in, msg_in_size, msg_out, msg_out_size_max,
+ msg_out_len, &gsc_session_retry);
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(GSC_PENDING_RETRY_PAUSE_MS);
+ } while (++tries < GSC_PENDING_RETRY_MAXCOUNT);
+
+ return ret;
+}
+
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
+{
+ /*
+ * GSC-fw loading, HuC-fw loading, HuC-fw authentication and
+ * GSC-proxy init flow (requiring an mei component driver)
+ * must all occur first before we can start requesting for PXP
+ * sessions. Checking for completion on HuC authentication and
+ * gsc-proxy init flow (the last set of dependencies that
+ * are out of order) will suffice.
+ */
+ if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) &&
+ intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true))
+ return true;
+
+ return false;
+}
+
+int intel_pxp_gsccs_create_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp43_create_arb_in msg_in = {0};
+ struct pxp43_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
+ msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, arb_session_id) |
+ FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
+ FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
+
+ ret = gsccs_send_message_retry_complete(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out), NULL);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to init session %d, ret=[%d]\n", arb_session_id, ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP init-session-%d failed due to BIOS/SOC:0x%08x:%s\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP init-session-%d failed 0x%08x:%st:\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+
+ return ret;
+}
+
+void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret = 0;
+
+ /*
+ * Stream key invalidation reuses the same version 4.2 input/output
+ * command format but firmware requires 4.3 API interaction
+ */
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = gsccs_send_message_retry_complete(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out), NULL);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to inv-stream-key-%u, ret=[%d]\n",
+ session_id, ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+}
+
+static void
+gsccs_cleanup_fw_host_session_handle(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ int ret;
+
+ ret = gsccs_send_message_retry_complete(pxp, NULL, 0, NULL, 0, NULL);
+ if (ret)
+ drm_dbg(&i915->drm, "Failed to send gsccs msg host-session-cleanup: ret=[%d]\n",
+ ret);
+}
+
+static void
+gsccs_destroy_execution_resource(struct intel_pxp *pxp)
+{
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+
+ if (exec_res->host_session_handle)
+ gsccs_cleanup_fw_host_session_handle(pxp);
+ if (exec_res->ce)
+ intel_context_put(exec_res->ce);
+ if (exec_res->bb_vma)
+ i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP);
+ if (exec_res->pkt_vma)
+ i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP);
+
+ memset(exec_res, 0, sizeof(*exec_res));
+}
+
+static int
+gsccs_create_buffer(struct intel_gt *gt,
+ const char *bufname, size_t size,
+ struct i915_vma **vma, void **map)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate gsccs backend %s.\n", bufname);
+ err = PTR_ERR(obj);
+ goto out_none;
+ }
+
+ *vma = i915_vma_instance(obj, gt->vm, NULL);
+ if (IS_ERR(*vma)) {
+ drm_err(&i915->drm, "Failed to vma-instance gsccs backend %s.\n", bufname);
+ err = PTR_ERR(*vma);
+ goto out_put;
+ }
+
+ /* return a virtual pointer */
+ *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
+ if (IS_ERR(*map)) {
+ drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
+ err = PTR_ERR(*map);
+ goto out_put;
+ }
+
+ /* all PXP sessions commands are treated as non-privileged */
+ err = i915_vma_pin(*vma, 0, 0, PIN_USER);
+ if (err) {
+ drm_err(&i915->drm, "Failed to vma-pin gsccs backend %s.\n", bufname);
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_none:
+ *vma = NULL;
+ *map = NULL;
+
+ return err;
+}
+
+static int
+gsccs_allocate_execution_resource(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+ struct intel_engine_cs *engine = gt->engine[GSC0];
+ struct intel_context *ce;
+ int err = 0;
+
+ /*
+ * First, ensure the GSC engine is present.
+ * NOTE: Backend would only be called with the correct gt.
+ */
+ if (!engine)
+ return -ENODEV;
+
+ /*
+ * Now, allocate, pin and map two objects, one for the heci message packet
+ * and another for the batch buffer we submit into GSC engine (that includes the packet).
+ * NOTE: GSC-CS backend is currently only supported on MTL, so we allocate shmem.
+ */
+ err = gsccs_create_buffer(pxp->ctrl_gt, "Heci Packet",
+ 2 * PXP43_MAX_HECI_INOUT_SIZE,
+ &exec_res->pkt_vma, &exec_res->pkt_vaddr);
+ if (err)
+ return err;
+
+ err = gsccs_create_buffer(pxp->ctrl_gt, "Batch Buffer", PAGE_SIZE,
+ &exec_res->bb_vma, &exec_res->bb_vaddr);
+ if (err)
+ goto free_pkt;
+
+ /* Finally, create an intel_context to be used during the submission */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "Failed creating gsccs backend ctx\n");
+ err = PTR_ERR(ce);
+ goto free_batch;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(pxp->ctrl_gt->vm);
+ exec_res->ce = ce;
+
+ /* initialize host-session-handle (for all i915-to-gsc-firmware PXP cmds) */
+ get_random_bytes(&exec_res->host_session_handle, sizeof(exec_res->host_session_handle));
+
+ return 0;
+
+free_batch:
+ i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP);
+free_pkt:
+ i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP);
+ memset(exec_res, 0, sizeof(*exec_res));
+
+ return err;
+}
+
+void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
+
+ gsccs_destroy_execution_resource(pxp);
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
+}
+
+int intel_pxp_gsccs_init(struct intel_pxp *pxp)
+{
+ int ret;
+ intel_wakeref_t wakeref;
+
+ ret = gsccs_allocate_execution_resource(pxp);
+ if (!ret) {
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref)
+ intel_pxp_init_hw(pxp);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
new file mode 100644
index 0000000000..298ad38e6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_GSCCS_H__
+#define __INTEL_PXP_GSCCS_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GSC_REPLY_LATENCY_MS 210
+/*
+ * Max FW response time is 200ms, to which we add 10ms to account for overhead
+ * such as request preparation, GuC submission to hw and pipeline completion times.
+ */
+#define GSC_PENDING_RETRY_MAXCOUNT 40
+#define GSC_PENDING_RETRY_PAUSE_MS 50
+#define GSCFW_MAX_ROUND_TRIP_LATENCY_MS (GSC_PENDING_RETRY_MAXCOUNT * GSC_PENDING_RETRY_PAUSE_MS)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_gsccs_fini(struct intel_pxp *pxp);
+int intel_pxp_gsccs_init(struct intel_pxp *pxp);
+
+int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
+void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+
+#else
+static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
+{
+}
+
+static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
+{
+ return 0;
+}
+
+#endif
+
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+
+#endif /*__INTEL_PXP_GSCCS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
new file mode 100644
index 0000000000..5eedce9169
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
+ */
+
+#include "i915_drv.h"
+
+#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_huc.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+#include "intel_pxp_cmd_interface_43.h"
+
+int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt;
+ struct intel_huc *huc;
+ struct pxp43_start_huc_auth_in huc_in = {0};
+ struct pxp43_huc_auth_out huc_out = {0};
+ dma_addr_t huc_phys_addr;
+ u8 client_id = 0;
+ u8 fence_id = 0;
+ int err;
+
+ if (!pxp || !pxp->pxp_component)
+ return -ENODEV;
+
+ gt = pxp->ctrl_gt;
+ huc = &gt->uc.huc;
+
+ huc_phys_addr = i915_gem_object_get_dma_address(huc->fw.obj, 0);
+
+ /* write the PXP message into the lmem (the sg list) */
+ huc_in.header.api_version = PXP_APIVER(4, 3);
+ huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH;
+ huc_in.header.status = 0;
+ huc_in.header.buffer_len = sizeof(huc_in.huc_base_address);
+ huc_in.huc_base_address = cpu_to_le64(huc_phys_addr);
+
+ err = intel_pxp_tee_stream_message(pxp, client_id, fence_id,
+ &huc_in, sizeof(huc_in),
+ &huc_out, sizeof(huc_out));
+ if (err < 0) {
+ drm_err(&gt->i915->drm,
+ "Failed to send HuC load and auth command to GSC [%d]!\n",
+ err);
+ return err;
+ }
+
+ /*
+ * HuC does sometimes survive suspend/resume (it depends on how "deep"
+ * a sleep state the device reaches) so we can end up here on resume
+ * with HuC already loaded, in which case the GSC will return
+ * PXP_STATUS_OP_NOT_PERMITTED. We can therefore consider the GuC
+ * correctly transferred in this scenario; if the same error is ever
+ * returned with HuC not loaded we'll still catch it when we check the
+ * authentication bit later.
+ */
+ if (huc_out.header.status != PXP_STATUS_SUCCESS &&
+ huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) {
+ drm_err(&gt->i915->drm,
+ "HuC load failed with GSC error = 0x%x\n",
+ huc_out.header.status);
+ return -EPROTO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h
new file mode 100644
index 0000000000..e40847a91c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_HUC_H__
+#define __INTEL_PXP_HUC_H__
+
+struct intel_pxp;
+
+int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp);
+
+#endif /* __INTEL_PXP_HUC_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
new file mode 100644
index 0000000000..91e9622c07
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_gt_types.h"
+
+#include "i915_irq.h"
+#include "i915_reg.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_types.h"
+#include "intel_runtime_pm.h"
+
+/**
+ * intel_pxp_irq_handler - Handles PXP interrupts.
+ * @pxp: pointer to pxp struct
+ * @iir: interrupt vector
+ */
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+ struct intel_gt *gt;
+
+ if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
+ return;
+
+ gt = pxp->ctrl_gt;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT |
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
+ /* immediately mark PXP as inactive on termination */
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ }
+
+ if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->session_events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->session_events)
+ queue_work(system_unbound_wq, &pxp->session_work);
+}
+
+static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 mask = interrupts << 16;
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask);
+}
+
+static inline void pxp_irq_reset(struct intel_gt *gt)
+{
+ spin_lock_irq(gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+
+ spin_lock_irq(gt->irq_lock);
+
+ if (!pxp->irq_enabled)
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
+
+ __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
+ pxp->irq_enabled = true;
+
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+
+ /*
+ * We always need to submit a global termination when we re-enable the
+ * interrupts, so there is no need to make sure that the session state
+ * makes sense at the end of this function. Just make sure this is not
+ * called in a path were the driver consider the session as valid and
+ * doesn't call a termination on restart.
+ */
+ GEM_WARN_ON(intel_pxp_is_active(pxp));
+
+ spin_lock_irq(gt->irq_lock);
+
+ pxp->irq_enabled = false;
+ __pxp_set_interrupts(gt, 0);
+
+ spin_unlock_irq(gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ pxp_irq_reset(gt);
+
+ flush_work(&pxp->session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
new file mode 100644
index 0000000000..8c292dc86f
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_IRQ_H__
+#define __INTEL_PXP_IRQ_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1)
+#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2)
+#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3)
+
+#define GEN12_PXP_INTERRUPTS \
+ (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \
+ GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_irq_enable(struct intel_pxp *pxp);
+void intel_pxp_irq_disable(struct intel_pxp *pxp);
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir);
+#else
+static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+}
+
+static inline void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 0000000000..1a04067f61
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_types.h"
+
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ intel_pxp_end(pxp);
+
+ intel_pxp_invalidate(pxp);
+}
+
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref) {
+ intel_pxp_fini_hw(pxp);
+ pxp->hw_state_invalidated = false;
+ }
+}
+
+void intel_pxp_resume_complete(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ * NOTE: GSC-CS backend doesn't rely on components.
+ */
+ if (!HAS_ENGINE(pxp->ctrl_gt, GSC0) && !pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 0000000000..06b46f535b
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+struct intel_pxp;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
+void intel_pxp_resume_complete(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_resume_complete(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+ intel_pxp_resume_complete(pxp);
+}
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h
new file mode 100644
index 0000000000..a9e7e6efa4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2023, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_REGS_H__
+#define __INTEL_PXP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* KCR subsystem register base address */
+#define GEN12_KCR_BASE 0x32000
+#define MTL_KCR_BASE 0x386000
+
+/* KCR enable/disable control */
+#define KCR_INIT(base) _MMIO((base) + 0xf0)
+
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+/* KCR hwdrm session in play status 0-31 */
+#define KCR_SIP(base) _MMIO((base) + 0x260)
+
+/* PXP global terminate register for session termination */
+#define KCR_GLOBAL_TERMINATE(base) _MMIO((base) + 0xf8)
+
+#endif /* __INTEL_PXP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
new file mode 100644
index 0000000000..0a3e66b026
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_gsccs.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+#include "intel_pxp_regs.h"
+
+#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
+
+static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
+{
+ struct intel_uncore *uncore = pxp->ctrl_gt->uncore;
+ intel_wakeref_t wakeref;
+ u32 sip = 0;
+
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, KCR_SIP(pxp->kcr_base));
+
+ return sip & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
+{
+ struct intel_uncore *uncore = pxp->ctrl_gt->uncore;
+ intel_wakeref_t wakeref;
+ u32 mask = BIT(id);
+ int ret;
+
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ KCR_SIP(pxp->kcr_base),
+ mask,
+ in_play ? mask : 0,
+ 250);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return ret;
+}
+
+static int pxp_create_arb_session(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ int ret;
+
+ pxp->arb_is_valid = false;
+
+ if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ drm_err(&gt->i915->drm, "arb session already in play at creation time\n");
+ return -EEXIST;
+ }
+
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ ret = intel_pxp_gsccs_create_session(pxp, ARB_SESSION);
+ else
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_dbg(&gt->i915->drm, "arb session failed to go in play\n");
+ return ret;
+ }
+ drm_dbg(&gt->i915->drm, "PXP ARB session is alive\n");
+
+ if (!++pxp->key_instance)
+ ++pxp->key_instance;
+
+ pxp->arb_is_valid = true;
+
+ return 0;
+}
+
+static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp->ctrl_gt;
+
+ /* must mark termination in progress calling this function */
+ GEM_WARN_ON(pxp->arb_is_valid);
+
+ /* terminate the hw sessions */
+ ret = intel_pxp_terminate_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to submit session termination\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Session state did not clear\n");
+ return ret;
+ }
+
+ intel_uncore_write(gt->uncore, KCR_GLOBAL_TERMINATE(pxp->kcr_base), 1);
+
+ if (HAS_ENGINE(gt, GSC0))
+ intel_pxp_gsccs_end_arb_fw_session(pxp, ARB_SESSION);
+ else
+ intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
+
+ return ret;
+}
+
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
+{
+ int ret;
+
+ pxp->hw_state_invalidated = post_invalidation_needs_restart;
+
+ /*
+ * if we fail to submit the termination there is no point in waiting for
+ * it to complete. PXP will be marked as non-active until the next
+ * termination is issued.
+ */
+ ret = pxp_terminate_arb_session_and_global(pxp);
+ if (ret)
+ complete_all(&pxp->termination);
+}
+
+static void pxp_terminate_complete(struct intel_pxp *pxp)
+{
+ /* Re-create the arb session after teardown handle complete */
+ if (fetch_and_zero(&pxp->hw_state_invalidated))
+ pxp_create_arb_session(pxp);
+
+ complete_all(&pxp->termination);
+}
+
+static void pxp_session_work(struct work_struct *work)
+{
+ struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
+ struct intel_gt *gt = pxp->ctrl_gt;
+ intel_wakeref_t wakeref;
+ u32 events = 0;
+
+ spin_lock_irq(gt->irq_lock);
+ events = fetch_and_zero(&pxp->session_events);
+ spin_unlock_irq(gt->irq_lock);
+
+ if (!events)
+ return;
+
+ if (events & PXP_INVAL_REQUIRED)
+ intel_pxp_invalidate(pxp);
+
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ intel_pxp_terminate(pxp, true);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_pxp_session_management_init(struct intel_pxp *pxp)
+{
+ mutex_init(&pxp->arb_mutex);
+ INIT_WORK(&pxp->session_work, pxp_session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
new file mode 100644
index 0000000000..ba57881271
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_SESSION_H__
+#define __INTEL_PXP_SESSION_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_session_management_init(struct intel_pxp *pxp);
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart);
+#else
+static inline void intel_pxp_session_management_init(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
+{
+}
+#endif
+#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
new file mode 100644
index 0000000000..80bb001898
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <linux/component.h>
+
+#include <drm/i915_pxp_tee_interface.h>
+#include <drm/i915_component.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd_interface_42.h"
+#include "intel_pxp_huc.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+static bool
+is_fw_err_platform_config(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const char *
+fw_err_to_string(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ return "ERR_API_VERSION";
+ case PXP_STATUS_NOT_READY:
+ return "ERR_NOT_READY";
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return "ERR_PLATFORM_CONFIG";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
+ void *msg_in, u32 msg_in_size,
+ void *msg_out, u32 msg_out_max_size,
+ u32 *msg_out_rcv_size)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ /*
+ * The binding of the component is asynchronous from i915 probe, so we
+ * can't be sure it has happened.
+ */
+ if (!pxp_component) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send PXP TEE message\n");
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
+ goto unlock;
+ }
+
+ if (ret > msg_out_max_size) {
+ drm_err(&i915->drm,
+ "Failed to receive PXP TEE message due to unexpected output size\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msg_out_rcv_size)
+ *msg_out_rcv_size = ret;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+ u8 client_id, u32 fence_id,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
+{
+ /* TODO: for bigger objects we need to use a sg of 4k pages */
+ const size_t max_msg_size = PAGE_SIZE;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ unsigned int offset = 0;
+ struct scatterlist *sg;
+ int ret;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+ return -ENOSPC;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ GEM_BUG_ON(!pxp->stream_cmd.obj);
+
+ sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset);
+
+ memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len);
+
+ ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id,
+ fence_id, sg, msg_in_len, sg);
+ if (ret < 0)
+ drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n");
+ else
+ memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len);
+
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+/**
+ * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
+ * @i915_kdev: pointer to i915 kernel device
+ * @tee_kdev: pointer to tee kernel device
+ * @data: pointer to pxp_tee_master containing the function pointers
+ *
+ * This bind function is called during the system boot or resume from system sleep.
+ *
+ * Return: return 0 if successful.
+ */
+static int i915_pxp_tee_component_bind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915->pxp;
+ struct intel_uc *uc = &pxp->ctrl_gt->uc;
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (!HAS_HECI_PXP(i915)) {
+ pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS);
+ if (drm_WARN_ON(&i915->drm, !pxp->dev_link))
+ return -ENODEV;
+ }
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = data;
+ pxp->pxp_component->tee_dev = tee_kdev;
+ mutex_unlock(&pxp->tee_mutex);
+
+ if (intel_uc_uses_huc(uc) && intel_huc_is_loaded_by_gsc(&uc->huc)) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ /* load huc via pxp */
+ ret = intel_huc_fw_load_and_auth_via_gsc(&uc->huc);
+ if (ret < 0)
+ drm_err(&i915->drm, "failed to load huc via gsc %d\n", ret);
+ }
+ }
+
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
+ /* the component is required to fully start the PXP HW */
+ if (intel_pxp_is_enabled(pxp))
+ intel_pxp_init_hw(pxp);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915->pxp;
+ intel_wakeref_t wakeref;
+
+ if (intel_pxp_is_enabled(pxp))
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = NULL;
+ mutex_unlock(&pxp->tee_mutex);
+
+ if (pxp->dev_link) {
+ device_link_del(pxp->dev_link);
+ pxp->dev_link = NULL;
+ }
+}
+
+static const struct component_ops i915_pxp_tee_component_ops = {
+ .bind = i915_pxp_tee_component_bind,
+ .unbind = i915_pxp_tee_component_unbind,
+};
+
+static int alloc_streaming_command(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct drm_i915_gem_object *obj = NULL;
+ void *cmd;
+ int err;
+
+ pxp->stream_cmd.obj = NULL;
+ pxp->stream_cmd.vaddr = NULL;
+
+ if (!IS_DGFX(i915))
+ return 0;
+
+ /* allocate lmem object of one page for PXP command memory and store it */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&i915->drm, "Failed to pin gsc message page!\n");
+ goto out_put;
+ }
+
+ /* map the lmem into the virtual memory pointer */
+ cmd = i915_gem_object_pin_map_unlocked(obj,
+ intel_gt_coherent_map_type(pxp->ctrl_gt,
+ obj, true));
+ if (IS_ERR(cmd)) {
+ drm_err(&i915->drm, "Failed to map gsc message page!\n");
+ err = PTR_ERR(cmd);
+ goto out_unpin;
+ }
+
+ memset(cmd, 0, obj->base.size);
+
+ pxp->stream_cmd.obj = obj;
+ pxp->stream_cmd.vaddr = cmd;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void free_streaming_command(struct intel_pxp *pxp)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj);
+
+ if (!obj)
+ return;
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+}
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct drm_i915_private *i915 = gt->i915;
+
+ ret = alloc_streaming_command(pxp);
+ if (ret)
+ return ret;
+
+ ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
+ I915_COMPONENT_PXP);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
+ goto out_free;
+ }
+
+ pxp->pxp_component_added = true;
+
+ return 0;
+
+out_free:
+ free_streaming_command(pxp);
+ return ret;
+}
+
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+
+ if (!pxp->pxp_component_added)
+ return;
+
+ component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
+ pxp->pxp_component_added = false;
+
+ free_streaming_command(pxp);
+}
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_create_arb_in msg_in = {0};
+ struct pxp42_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INIT_SESSION;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP42_ARB_SESSION_MODE_HEAVY;
+ msg_in.session_id = arb_session_id;
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send tee msg init arb session, ret=[%d]\n", ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP init-arb-session-%d failed due to BIOS/SOC:0x%08x:%s\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP init-arb-session--%d failed 0x%08x:%st:\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+
+ return ret;
+}
+
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret, trials = 0;
+
+try_again:
+ memset(&msg_in, 0, sizeof(msg_in));
+ memset(&msg_out, 0, sizeof(msg_out));
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */
+ if ((ret || msg_out.header.status != 0x0) && ++trials < 3)
+ goto try_again;
+
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%u, ret=[%d]\n",
+ session_id, ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
new file mode 100644
index 0000000000..aeb3dfe7ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_H__
+#define __INTEL_PXP_TEE_H__
+
+#include "intel_pxp.h"
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp);
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id);
+
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+ u8 client_id, u32 fence_id,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+
+#endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
new file mode 100644
index 0000000000..1a8765866b
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TYPES_H__
+#define __INTEL_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_context;
+struct intel_gt;
+struct i915_pxp_component;
+struct drm_i915_private;
+
+/**
+ * struct intel_pxp - pxp state
+ */
+struct intel_pxp {
+ /**
+ * @ctrl_gt: poiner to the tile that owns the controls for PXP subsystem assets that
+ * the VDBOX, the KCR engine (and GSC CS depending on the platform)
+ */
+ struct intel_gt *ctrl_gt;
+
+ /**
+ * @kcr_base: base mmio offset for the KCR engine which is different on legacy platforms
+ * vs newer platforms where the KCR is inside the media-tile.
+ */
+ u32 kcr_base;
+
+ /**
+ * @gsccs_res: resources for request submission for platforms that have a GSC engine.
+ */
+ struct gsccs_session_resources {
+ u64 host_session_handle; /* used by firmware to link commands to sessions */
+ struct intel_context *ce; /* context for gsc command submission */
+
+ struct i915_vma *pkt_vma; /* GSC FW cmd packet vma */
+ void *pkt_vaddr; /* GSC FW cmd packet virt pointer */
+
+ struct i915_vma *bb_vma; /* HECI_PKT batch buffer vma */
+ void *bb_vaddr; /* HECI_PKT batch buffer virt pointer */
+ } gsccs_res;
+
+ /**
+ * @pxp_component: i915_pxp_component struct of the bound mei_pxp
+ * module. Only set and cleared inside component bind/unbind functions,
+ * which are protected by &tee_mutex.
+ */
+ struct i915_pxp_component *pxp_component;
+
+ /**
+ * @dev_link: Enforce module relationship for power management ordering.
+ */
+ struct device_link *dev_link;
+ /**
+ * @pxp_component_added: track if the pxp component has been added.
+ * Set and cleared in tee init and fini functions respectively.
+ */
+ bool pxp_component_added;
+
+ /** @ce: kernel-owned context used for PXP operations */
+ struct intel_context *ce;
+
+ /** @arb_mutex: protects arb session start */
+ struct mutex arb_mutex;
+ /**
+ * @arb_is_valid: tracks arb session status.
+ * After a teardown, the arb session can still be in play on the HW
+ * even if the keys are gone, so we can't rely on the HW state of the
+ * session to know if it's valid and need to track the status in SW.
+ */
+ bool arb_is_valid;
+
+ /**
+ * @key_instance: tracks which key instance we're on, so we can use it
+ * to determine if an object was created using the current key or a
+ * previous one.
+ */
+ u32 key_instance;
+
+ /** @tee_mutex: protects the tee channel binding and messaging. */
+ struct mutex tee_mutex;
+
+ /** @stream_cmd: LMEM obj used to send stream PXP commands to the GSC */
+ struct {
+ struct drm_i915_gem_object *obj; /* contains PXP command memory */
+ void *vaddr; /* virtual memory for PXP command */
+ } stream_cmd;
+
+ /**
+ * @hw_state_invalidated: if the HW perceives an attack on the integrity
+ * of the encryption it will invalidate the keys and expect SW to
+ * re-initialize the session. We keep track of this state to make sure
+ * we only re-start the arb session when required.
+ */
+ bool hw_state_invalidated;
+
+ /** @irq_enabled: tracks the status of the kcr irqs */
+ bool irq_enabled;
+ /**
+ * @termination: tracks the status of a pending termination. Only
+ * re-initialized under gt->irq_lock and completed in &session_work.
+ */
+ struct completion termination;
+
+ /** @session_work: worker that manages session events. */
+ struct work_struct session_work;
+ /** @session_events: pending session events, protected with gt->irq_lock. */
+ u32 session_events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+#define PXP_INVAL_REQUIRED BIT(2)
+};
+
+#endif /* __INTEL_PXP_TYPES_H__ */