summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/pxp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/i915/pxp
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/pxp')
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c319
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h67
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c141
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c81
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c102
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.h32
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c61
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h37
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c174
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c179
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.h17
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h36
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h81
16 files changed, 1378 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
new file mode 100644
index 000000000..69cdaaddc
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
+ * It allows execution and flip to display of protected (i.e. encrypted)
+ * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
+ *
+ * Objects can opt-in to PXP encryption at creation time via the
+ * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
+ * correctly protected they must be used in conjunction with a context created
+ * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
+ * of those two uapi flags for details and restrictions.
+ *
+ * Protected objects are tied to a pxp session; currently we only support one
+ * session, which i915 manages and whose index is available in the uapi
+ * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
+ * protected objects.
+ * The session is invalidated by the HW when certain events occur (e.g.
+ * suspend/resume). When this happens, all the objects that were used with the
+ * session are marked as invalid and all contexts marked as using protected
+ * content are banned. Any further attempt at using them in an execbuf call is
+ * rejected, while flips are converted to black frames.
+ *
+ * Some of the PXP setup operations are performed by the Management Engine,
+ * which is handled by the mei driver; communication between i915 and mei is
+ * performed via the mei_pxp component module.
+ */
+
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
+{
+ return container_of(pxp, struct intel_gt, pxp);
+}
+
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return pxp->ce;
+}
+
+bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return pxp->arb_is_valid;
+}
+
+/* KCR register definitions */
+#define KCR_INIT _MMIO(0x320f0)
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+static void kcr_pxp_enable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static void kcr_pxp_disable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static int create_vcs_context(struct intel_pxp *pxp)
+{
+ static struct lock_class_key pxp_lock;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int i;
+
+ /*
+ * Find the first VCS engine present. We're guaranteed there is one
+ * if we're in this function due to the check in has_pxp
+ */
+ for (i = 0, engine = NULL; !engine; i++)
+ engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
+
+ GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_PXP_ADDR,
+ &pxp_lock, "pxp_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
+ return PTR_ERR(ce);
+ }
+
+ pxp->ce = ce;
+
+ return 0;
+}
+
+static void destroy_vcs_context(struct intel_pxp *pxp)
+{
+ intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
+}
+
+void intel_pxp_init(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ if (!HAS_PXP(gt->i915))
+ return;
+
+ mutex_init(&pxp->tee_mutex);
+
+ /*
+ * we'll use the completion to check if there is a termination pending,
+ * so we start it as completed and we reinit it when a termination
+ * is triggered.
+ */
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+
+ mutex_init(&pxp->arb_mutex);
+ INIT_WORK(&pxp->session_work, intel_pxp_session_work);
+
+ ret = create_vcs_context(pxp);
+ if (ret)
+ return;
+
+ ret = intel_pxp_tee_component_init(pxp);
+ if (ret)
+ goto out_context;
+
+ drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
+
+ return;
+
+out_context:
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_fini(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_tee_component_fini(pxp);
+
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+ reinit_completion(&pxp->termination);
+}
+
+static void pxp_queue_termination(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We want to get the same effect as if we received a termination
+ * interrupt, so just pretend that we did.
+ */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST;
+ queue_work(system_unbound_wq, &pxp->session_work);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
+}
+
+/*
+ * the arb session is restarted from the irq work when we receive the
+ * termination completion interrupt
+ */
+int intel_pxp_start(struct intel_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return -ENXIO;
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (pxp->arb_is_valid)
+ goto unlock;
+
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(250))) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* make sure the compiler doesn't optimize the double access */
+ barrier();
+
+ if (!pxp->arb_is_valid)
+ ret = -EIO;
+
+unlock:
+ mutex_unlock(&pxp->arb_mutex);
+ return ret;
+}
+
+void intel_pxp_init_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_enable(pxp_to_gt(pxp));
+ intel_pxp_irq_enable(pxp);
+}
+
+void intel_pxp_fini_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_disable(pxp_to_gt(pxp));
+
+ intel_pxp_irq_disable(pxp);
+}
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ if (!i915_gem_object_is_protected(obj))
+ return -EINVAL;
+
+ GEM_BUG_ON(!pxp->key_instance);
+
+ /*
+ * If this is the first time we're using this object, it's not
+ * encrypted yet; it will be encrypted with the current key, so mark it
+ * as such. If the object is already encrypted, check instead if the
+ * used key is still valid.
+ */
+ if (!obj->pxp_key_instance && assign)
+ obj->pxp_key_instance = pxp->key_instance;
+
+ if (obj->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+void intel_pxp_invalidate(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_gem_context *ctx, *cn;
+
+ /* ban all contexts marked as protected */
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ if (likely(!i915_gem_context_uses_protected_content(ctx))) {
+ i915_gem_context_put(ctx);
+ continue;
+ }
+
+ spin_unlock_irq(&i915->gem.contexts.lock);
+
+ /*
+ * By the time we get here we are either going to suspend with
+ * quiesced execution or the HW keys are already long gone and
+ * in this case it is worthless to attempt to close the context
+ * and wait for its execution. It will hang the GPU if it has
+ * not already. So, as a fast mitigation, we can ban the
+ * context as quick as we can. That might race with the
+ * execbuffer, but currently this is the best that can be done.
+ */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ intel_context_ban(ce, NULL);
+ i915_gem_context_unlock_engines(ctx);
+
+ /*
+ * The context has been banned, no need to keep the wakeref.
+ * This is safe from races because the only other place this
+ * is touched is context_release and we're holding a ctx ref
+ */
+ if (ctx->pxp_wakeref) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ ctx->pxp_wakeref);
+ ctx->pxp_wakeref = 0;
+ }
+
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock_irq(&i915->gem.contexts.lock);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
new file mode 100644
index 000000000..73847e535
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct intel_pxp;
+struct drm_i915_gem_object;
+
+#ifdef CONFIG_DRM_I915_PXP
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp);
+bool intel_pxp_is_active(const struct intel_pxp *pxp);
+
+void intel_pxp_init(struct intel_pxp *pxp);
+void intel_pxp_fini(struct intel_pxp *pxp);
+
+void intel_pxp_init_hw(struct intel_pxp *pxp);
+void intel_pxp_fini_hw(struct intel_pxp *pxp);
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+
+int intel_pxp_start(struct intel_pxp *pxp);
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign);
+
+void intel_pxp_invalidate(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_init(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_fini(struct intel_pxp *pxp)
+{
+}
+
+static inline int intel_pxp_start(struct intel_pxp *pxp)
+{
+ return -ENODEV;
+}
+
+static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return false;
+}
+
+static inline bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return false;
+}
+
+static inline int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __INTEL_PXP_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
new file mode 100644
index 000000000..f41e45763
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_trace.h"
+
+/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+
+static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
+{
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp off */
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* select session */
+ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
+
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp on */
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+
+ *cs++ = MFX_WAIT_PXP;
+
+ return cs;
+}
+
+static u32 *pxp_emit_inline_termination(u32 *cs)
+{
+ /* session inline termination */
+ *cs++ = CRYPTO_KEY_EXCHANGE;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
+{
+ cs = pxp_emit_session_selection(cs, idx);
+ cs = pxp_emit_inline_termination(cs);
+
+ return cs;
+}
+
+static u32 *pxp_emit_wait(u32 *cs)
+{
+ /* wait for cmds to go through */
+ *cs++ = MFX_WAIT_PXP;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/*
+ * if we ever need to terminate more than one session, we can submit multiple
+ * selections and terminations back-to-back with a single wait at the end
+ */
+#define SELECTION_LEN 10
+#define TERMINATION_LEN 2
+#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
+#define WAIT_LEN 2
+
+static void pxp_request_commit(struct i915_request *rq)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
+{
+ struct i915_request *rq;
+ struct intel_context *ce = pxp->ce;
+ u32 *cs;
+ int err = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ cs = pxp_emit_session_termination(cs, id);
+ cs = pxp_emit_wait(cs);
+
+ intel_ring_advance(rq, cs);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ pxp_request_commit(rq);
+
+ if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
new file mode 100644
index 000000000..6d6299543
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_CMD_H__
+#define __INTEL_PXP_CMD_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx);
+
+#endif /* __INTEL_PXP_CMD_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
new file mode 100644
index 000000000..4359e8be4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_debugfs.h"
+#include "intel_pxp_irq.h"
+
+static int pxp_info_show(struct seq_file *m, void *data)
+{
+ struct intel_pxp *pxp = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ bool enabled = intel_pxp_is_enabled(pxp);
+
+ if (!enabled) {
+ drm_printf(&p, "pxp disabled\n");
+ return 0;
+ }
+
+ drm_printf(&p, "active: %s\n", str_yes_no(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info);
+
+static int pxp_terminate_get(void *data, u64 *val)
+{
+ /* nothing to read */
+ return -EPERM;
+}
+
+static int pxp_terminate_set(void *data, u64 val)
+{
+ struct intel_pxp *pxp = data;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(gt->irq_lock);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "info", &pxp_info_fops, NULL },
+ { "terminate_state", &pxp_terminate_fops, NULL },
+ };
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ if (!HAS_PXP((pxp_to_gt(pxp)->i915)))
+ return;
+
+ root = debugfs_create_dir("pxp", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
new file mode 100644
index 000000000..7e0c3d2f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_DEBUGFS_H__
+#define __INTEL_PXP_DEBUGFS_H__
+
+struct intel_pxp;
+struct dentry;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root);
+#else
+static inline void
+intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
new file mode 100644
index 000000000..c28be4307
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_gt_types.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "intel_runtime_pm.h"
+
+/**
+ * intel_pxp_irq_handler - Handles PXP interrupts.
+ * @pxp: pointer to pxp struct
+ * @iir: interrupt vector
+ */
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
+ return;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT |
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
+ /* immediately mark PXP as inactive on termination */
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ }
+
+ if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->session_events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->session_events)
+ queue_work(system_unbound_wq, &pxp->session_work);
+}
+
+static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 mask = interrupts << 16;
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask);
+}
+
+static inline void pxp_irq_reset(struct intel_gt *gt)
+{
+ spin_lock_irq(gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ spin_lock_irq(gt->irq_lock);
+
+ if (!pxp->irq_enabled)
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
+
+ __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
+ pxp->irq_enabled = true;
+
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We always need to submit a global termination when we re-enable the
+ * interrupts, so there is no need to make sure that the session state
+ * makes sense at the end of this function. Just make sure this is not
+ * called in a path were the driver consider the session as valid and
+ * doesn't call a termination on restart.
+ */
+ GEM_WARN_ON(intel_pxp_is_active(pxp));
+
+ spin_lock_irq(gt->irq_lock);
+
+ pxp->irq_enabled = false;
+ __pxp_set_interrupts(gt, 0);
+
+ spin_unlock_irq(gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ pxp_irq_reset(gt);
+
+ flush_work(&pxp->session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
new file mode 100644
index 000000000..8b5793654
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_IRQ_H__
+#define __INTEL_PXP_IRQ_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1)
+#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2)
+#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3)
+
+#define GEN12_PXP_INTERRUPTS \
+ (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \
+ GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_irq_enable(struct intel_pxp *pxp);
+void intel_pxp_irq_disable(struct intel_pxp *pxp);
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir);
+#else
+static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 000000000..6a7d4e2ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+#include "i915_drv.h"
+
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_invalidate(pxp);
+}
+
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+ intel_pxp_fini_hw(pxp);
+ pxp->hw_state_invalidated = false;
+ }
+}
+
+void intel_pxp_resume(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ */
+ if (!pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 000000000..586be7691
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+struct intel_pxp;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
+void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_resume(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+ intel_pxp_resume(pxp);
+}
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
new file mode 100644
index 000000000..1bb5b5249
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
+
+#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */
+
+/* PXP global terminate register for session termination */
+#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8)
+
+static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 sip = 0;
+
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
+
+ return sip & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 mask = BIT(id);
+ int ret;
+
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ GEN12_KCR_SIP,
+ mask,
+ in_play ? mask : 0,
+ 100);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return ret;
+}
+
+static int pxp_create_arb_session(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ pxp->arb_is_valid = false;
+
+ if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ drm_err(&gt->i915->drm, "arb session already in play at creation time\n");
+ return -EEXIST;
+ }
+
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&gt->i915->drm, "arb session failed to go in play\n");
+ return ret;
+ }
+
+ if (!++pxp->key_instance)
+ ++pxp->key_instance;
+
+ pxp->arb_is_valid = true;
+
+ return 0;
+}
+
+static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /* must mark termination in progress calling this function */
+ GEM_WARN_ON(pxp->arb_is_valid);
+
+ /* terminate the hw sessions */
+ ret = intel_pxp_terminate_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to submit session termination\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Session state did not clear\n");
+ return ret;
+ }
+
+ intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+
+ return ret;
+}
+
+static void pxp_terminate(struct intel_pxp *pxp)
+{
+ int ret;
+
+ pxp->hw_state_invalidated = true;
+
+ /*
+ * if we fail to submit the termination there is no point in waiting for
+ * it to complete. PXP will be marked as non-active until the next
+ * termination is issued.
+ */
+ ret = pxp_terminate_arb_session_and_global(pxp);
+ if (ret)
+ complete_all(&pxp->termination);
+}
+
+static void pxp_terminate_complete(struct intel_pxp *pxp)
+{
+ /* Re-create the arb session after teardown handle complete */
+ if (fetch_and_zero(&pxp->hw_state_invalidated))
+ pxp_create_arb_session(pxp);
+
+ complete_all(&pxp->termination);
+}
+
+void intel_pxp_session_work(struct work_struct *work)
+{
+ struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ intel_wakeref_t wakeref;
+ u32 events = 0;
+
+ spin_lock_irq(gt->irq_lock);
+ events = fetch_and_zero(&pxp->session_events);
+ spin_unlock_irq(gt->irq_lock);
+
+ if (!events)
+ return;
+
+ if (events & PXP_INVAL_REQUIRED)
+ intel_pxp_invalidate(pxp);
+
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
new file mode 100644
index 000000000..ba4c9d2b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_SESSION_H__
+#define __INTEL_PXP_SESSION_H__
+
+#include <linux/types.h>
+
+struct work_struct;
+
+void intel_pxp_session_work(struct work_struct *work);
+
+#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
new file mode 100644
index 000000000..4b6f5655f
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <linux/component.h>
+
+#include <drm/i915_pxp_tee_interface.h>
+#include <drm/i915_component.h>
+
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_tee_interface.h"
+
+static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+
+ return &to_gt(i915)->pxp;
+}
+
+static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
+ void *msg_in, u32 msg_in_size,
+ void *msg_out, u32 msg_out_max_size,
+ u32 *msg_out_rcv_size)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ /*
+ * The binding of the component is asynchronous from i915 probe, so we
+ * can't be sure it has happened.
+ */
+ if (!pxp_component) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send PXP TEE message\n");
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
+ goto unlock;
+ }
+
+ if (ret > msg_out_max_size) {
+ drm_err(&i915->drm,
+ "Failed to receive PXP TEE message due to unexpected output size\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msg_out_rcv_size)
+ *msg_out_rcv_size = ret;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+/**
+ * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
+ * @i915_kdev: pointer to i915 kernel device
+ * @tee_kdev: pointer to tee kernel device
+ * @data: pointer to pxp_tee_master containing the function pointers
+ *
+ * This bind function is called during the system boot or resume from system sleep.
+ *
+ * Return: return 0 if successful.
+ */
+static int i915_pxp_tee_component_bind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = data;
+ pxp->pxp_component->tee_dev = tee_kdev;
+ mutex_unlock(&pxp->tee_mutex);
+
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
+ /* the component is required to fully start the PXP HW */
+ intel_pxp_init_hw(pxp);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = NULL;
+ mutex_unlock(&pxp->tee_mutex);
+}
+
+static const struct component_ops i915_pxp_tee_component_ops = {
+ .bind = i915_pxp_tee_component_bind,
+ .unbind = i915_pxp_tee_component_unbind,
+};
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct drm_i915_private *i915 = gt->i915;
+
+ ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
+ I915_COMPONENT_PXP);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
+ return ret;
+ }
+
+ pxp->pxp_component_added = true;
+
+ return 0;
+}
+
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+
+ if (!pxp->pxp_component_added)
+ return;
+
+ component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
+ pxp->pxp_component_added = false;
+}
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct pxp_tee_create_arb_in msg_in = {0};
+ struct pxp_tee_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_TEE_APIVER;
+ msg_in.header.command_id = PXP_TEE_ARB_CMDID;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP_TEE_ARB_PROTECTION_MODE;
+ msg_in.session_id = arb_session_id;
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
new file mode 100644
index 000000000..c136053ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_H__
+#define __INTEL_PXP_TEE_H__
+
+#include "intel_pxp.h"
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp);
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id);
+
+#endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
new file mode 100644
index 000000000..36e9b0868
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_INTERFACE_H__
+#define __INTEL_PXP_TEE_INTERFACE_H__
+
+#include <linux/types.h>
+
+#define PXP_TEE_APIVER 0x40002
+#define PXP_TEE_ARB_CMDID 0x1e
+#define PXP_TEE_ARB_PROTECTION_MODE 0x2
+
+/* PXP TEE message header */
+struct pxp_tee_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ u32 status;
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* PXP TEE message input to create a arbitrary session */
+struct pxp_tee_create_arb_in {
+ struct pxp_tee_cmd_header header;
+ u32 protection_mode;
+ u32 session_id;
+} __packed;
+
+/* PXP TEE message output to create a arbitrary session */
+struct pxp_tee_create_arb_out {
+ struct pxp_tee_cmd_header header;
+} __packed;
+
+#endif /* __INTEL_PXP_TEE_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
new file mode 100644
index 000000000..7ce5f37ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TYPES_H__
+#define __INTEL_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_context;
+struct i915_pxp_component;
+
+/**
+ * struct intel_pxp - pxp state
+ */
+struct intel_pxp {
+ /**
+ * @pxp_component: i915_pxp_component struct of the bound mei_pxp
+ * module. Only set and cleared inside component bind/unbind functions,
+ * which are protected by &tee_mutex.
+ */
+ struct i915_pxp_component *pxp_component;
+ /**
+ * @pxp_component_added: track if the pxp component has been added.
+ * Set and cleared in tee init and fini functions respectively.
+ */
+ bool pxp_component_added;
+
+ /** @ce: kernel-owned context used for PXP operations */
+ struct intel_context *ce;
+
+ /** @arb_mutex: protects arb session start */
+ struct mutex arb_mutex;
+ /**
+ * @arb_is_valid: tracks arb session status.
+ * After a teardown, the arb session can still be in play on the HW
+ * even if the keys are gone, so we can't rely on the HW state of the
+ * session to know if it's valid and need to track the status in SW.
+ */
+ bool arb_is_valid;
+
+ /**
+ * @key_instance: tracks which key instance we're on, so we can use it
+ * to determine if an object was created using the current key or a
+ * previous one.
+ */
+ u32 key_instance;
+
+ /** @tee_mutex: protects the tee channel binding and messaging. */
+ struct mutex tee_mutex;
+
+ /**
+ * @hw_state_invalidated: if the HW perceives an attack on the integrity
+ * of the encryption it will invalidate the keys and expect SW to
+ * re-initialize the session. We keep track of this state to make sure
+ * we only re-start the arb session when required.
+ */
+ bool hw_state_invalidated;
+
+ /** @irq_enabled: tracks the status of the kcr irqs */
+ bool irq_enabled;
+ /**
+ * @termination: tracks the status of a pending termination. Only
+ * re-initialized under gt->irq_lock and completed in &session_work.
+ */
+ struct completion termination;
+
+ /** @session_work: worker that manages session events. */
+ struct work_struct session_work;
+ /** @session_events: pending session events, protected with gt->irq_lock. */
+ u32 session_events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+#define PXP_INVAL_REQUIRED BIT(2)
+};
+
+#endif /* __INTEL_PXP_TYPES_H__ */