dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: Alan Previn <alan.previn.teres.alexis@intel.com>,
	dri-devel@lists.freedesktop.org,
	Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Tomas Winkler <tomas.winkler@intel.com>,
	Vitaly Lubart <vitaly.lubart@intel.com>
Subject: [PATCH v4 08/15] drm/i915/pxp: implement function for sending tee stream command
Date: Thu,  8 Sep 2022 17:16:05 -0700	[thread overview]
Message-ID: <20220909001612.728451-9-daniele.ceraolospurio@intel.com> (raw)
In-Reply-To: <20220909001612.728451-1-daniele.ceraolospurio@intel.com>

From: Vitaly Lubart <vitaly.lubart@intel.com>

Command to be sent via the stream interface are written to a local
memory page, whose address is then provided to the GSC.
The interface supports providing a full sg with multiple pages for both
input and output messages, but since for now we only aim to support short
and synchronous messages we can use a single page for both input and
output.

Note that the mei interface expects an sg of 4k pages, while our lmem pages
are 64k. If we ever need to support more than 4k we'll need to convert.
Added a TODO comment to the code to record this.

Signed-off-by: Vitaly Lubart <vitaly.lubart@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Alan Previn <alan.previn.teres.alexis@intel.com>
---
 drivers/gpu/drm/i915/pxp/intel_pxp_tee.c   | 114 ++++++++++++++++++++-
 drivers/gpu/drm/i915/pxp/intel_pxp_tee.h   |   5 +
 drivers/gpu/drm/i915/pxp/intel_pxp_types.h |   6 ++
 3 files changed, 124 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 2c1fc49ecec1..e0d09455a92e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -7,6 +7,7 @@
 
 #include <drm/i915_pxp_tee_interface.h>
 #include <drm/i915_component.h>
+#include "gem/i915_gem_region.h"
 
 #include "i915_drv.h"
 #include "intel_pxp.h"
@@ -69,6 +70,47 @@ static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
 	return ret;
 }
 
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+				 u8 client_id, u32 fence_id,
+				 void *msg_in, size_t msg_in_len,
+				 void *msg_out, size_t msg_out_len)
+{
+	/* TODO: for bigger objects we need to use a sg of 4k pages */
+	const size_t max_msg_size = PAGE_SIZE;
+	struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+	struct i915_pxp_component *pxp_component = pxp->pxp_component;
+	unsigned int offset = 0;
+	struct scatterlist *sg;
+	int ret;
+
+	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+		return -ENOSPC;
+
+	mutex_lock(&pxp->tee_mutex);
+
+	if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
+	GEM_BUG_ON(!pxp->stream_cmd.obj);
+
+	sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset);
+
+	memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len);
+
+	ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id,
+					      fence_id, sg, msg_in_len, sg);
+	if (ret < 0)
+		drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n");
+	else
+		memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len);
+
+unlock:
+	mutex_unlock(&pxp->tee_mutex);
+	return ret;
+}
+
 /**
  * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
  * @i915_kdev: pointer to i915 kernel device
@@ -126,6 +168,66 @@ static const struct component_ops i915_pxp_tee_component_ops = {
 	.unbind = i915_pxp_tee_component_unbind,
 };
 
+static int alloc_streaming_command(struct intel_pxp *pxp)
+{
+	struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+	struct drm_i915_gem_object *obj = NULL;
+	void *cmd;
+	int err;
+
+	pxp->stream_cmd.obj = NULL;
+	pxp->stream_cmd.vaddr = NULL;
+
+	if (!IS_DGFX(i915))
+		return 0;
+
+	/* allocate lmem object of one page for PXP command memory and store it */
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj)) {
+		drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n");
+		return PTR_ERR(obj);
+	}
+
+	err = i915_gem_object_pin_pages_unlocked(obj);
+	if (err) {
+		drm_err(&i915->drm, "Failed to pin gsc message page!\n");
+		goto out_put;
+	}
+
+	/* map the lmem into the virtual memory pointer */
+	cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+	if (IS_ERR(cmd)) {
+		drm_err(&i915->drm, "Failed to map gsc message page!\n");
+		err = PTR_ERR(cmd);
+		goto out_unpin;
+	}
+
+	memset(cmd, 0, obj->base.size);
+
+	pxp->stream_cmd.obj = obj;
+	pxp->stream_cmd.vaddr = cmd;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static void free_streaming_command(struct intel_pxp *pxp)
+{
+	struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj);
+
+	if (!obj)
+		return;
+
+	i915_gem_object_unpin_map(obj);
+	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_put(obj);
+}
+
 int intel_pxp_tee_component_init(struct intel_pxp *pxp)
 {
 	int ret;
@@ -134,16 +236,24 @@ int intel_pxp_tee_component_init(struct intel_pxp *pxp)
 
 	mutex_init(&pxp->tee_mutex);
 
+	ret = alloc_streaming_command(pxp);
+	if (ret)
+		return ret;
+
 	ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
 				  I915_COMPONENT_PXP);
 	if (ret < 0) {
 		drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
-		return ret;
+		goto out_free;
 	}
 
 	pxp->pxp_component_added = true;
 
 	return 0;
+
+out_free:
+	free_streaming_command(pxp);
+	return ret;
 }
 
 void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
@@ -155,6 +265,8 @@ void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
 
 	component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
 	pxp->pxp_component_added = false;
+
+	free_streaming_command(pxp);
 }
 
 int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
index c136053ce340..aeb3dfe7ce96 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -14,4 +14,9 @@ void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
 int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
 					 int arb_session_id);
 
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+				 u8 client_id, u32 fence_id,
+				 void *msg_in, size_t msg_in_len,
+				 void *msg_out, size_t msg_out_len);
+
 #endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7ce5f37ee12e..f74b1e11a505 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -53,6 +53,12 @@ struct intel_pxp {
 	/** @tee_mutex: protects the tee channel binding and messaging. */
 	struct mutex tee_mutex;
 
+	/** @stream_cmd: LMEM obj used to send stream PXP commands to the GSC */
+	struct {
+		struct drm_i915_gem_object *obj; /* contains PXP command memory */
+		void *vaddr; /* virtual memory for PXP command */
+	} stream_cmd;
+
 	/**
 	 * @hw_state_invalidated: if the HW perceives an attack on the integrity
 	 * of the encryption it will invalidate the keys and expect SW to
-- 
2.37.2


  parent reply	other threads:[~2022-09-09  0:17 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-09  0:15 [PATCH v4 00/15] drm/i915: HuC loading for DG2 Daniele Ceraolo Spurio
2022-09-09  0:15 ` [PATCH v4 01/15] mei: add support to GSC extended header Daniele Ceraolo Spurio
2022-09-09  6:11   ` Greg Kroah-Hartman
2022-09-09  6:29     ` Winkler, Tomas
2022-09-09  0:15 ` [PATCH v4 02/15] mei: bus: enable sending gsc commands Daniele Ceraolo Spurio
2022-09-09  0:16 ` [PATCH v4 03/15] mei: adjust extended header kdocs Daniele Ceraolo Spurio
2022-09-09  6:12   ` Greg Kroah-Hartman
2022-09-09  0:16 ` [PATCH v4 04/15] mei: bus: extend bus API to support command streamer API Daniele Ceraolo Spurio
2022-09-09  0:16 ` [PATCH v4 05/15] mei: pxp: add command streamer API to the PXP driver Daniele Ceraolo Spurio
2022-09-09  6:14   ` Greg Kroah-Hartman
2022-09-09  6:38     ` Winkler, Tomas
2022-09-09  6:42       ` Greg Kroah-Hartman
2022-09-12  9:59         ` Winkler, Tomas
2022-09-12 14:25           ` Greg Kroah-Hartman
2022-09-09  0:16 ` [PATCH v4 06/15] mei: pxp: support matching with a gfx discrete card Daniele Ceraolo Spurio
2022-09-09  6:16   ` Greg Kroah-Hartman
2022-09-09  6:51     ` Winkler, Tomas
2022-09-09  6:59       ` Greg Kroah-Hartman
2022-09-09  9:21         ` Winkler, Tomas
2022-09-09 10:19           ` Greg Kroah-Hartman
2022-09-12 10:05             ` Winkler, Tomas
2022-09-09  0:16 ` [PATCH v4 07/15] drm/i915/pxp: load the pxp module when we have a gsc-loaded huc Daniele Ceraolo Spurio
2022-09-09  0:16 ` Daniele Ceraolo Spurio [this message]
2022-09-09  0:16 ` [PATCH v4 09/15] drm/i915/pxp: add huc authentication and loading command Daniele Ceraolo Spurio
2022-09-09  0:16 ` [PATCH v4 10/15] drm/i915/dg2: setup HuC loading via GSC Daniele Ceraolo Spurio
2022-09-09 20:54   ` Teres Alexis, Alan Previn
2022-09-09  0:16 ` [PATCH v4 11/15] drm/i915/huc: track delayed HuC load with a fence Daniele Ceraolo Spurio
2022-09-09 21:10   ` Teres Alexis, Alan Previn
2022-09-09  0:16 ` [PATCH v4 12/15] drm/i915/huc: stall media submission until HuC is loaded Daniele Ceraolo Spurio
2022-09-09 21:20   ` Ye, Tony
2022-09-09  0:16 ` [PATCH v4 13/15] drm/i915/huc: better define HuC status getparam possible return values Daniele Ceraolo Spurio
2022-09-09  0:16 ` [PATCH v4 14/15] drm/i915/huc: define gsc-compatible HuC fw for DG2 Daniele Ceraolo Spurio
2022-09-09 21:20   ` Ye, Tony
2022-09-10  0:18   ` Teres Alexis, Alan Previn
2022-09-09  0:16 ` [PATCH v4 15/15] HAX: drm/i915: force INTEL_MEI_GSC and INTEL_MEI_PXP on for CI Daniele Ceraolo Spurio
2022-09-09  0:22   ` Ceraolo Spurio, Daniele
2022-09-09  6:01 ` [PATCH v4 00/15] drm/i915: HuC loading for DG2 Winkler, Tomas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220909001612.728451-9-daniele.ceraolospurio@intel.com \
    --to=daniele.ceraolospurio@intel.com \
    --cc=alan.previn.teres.alexis@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=tomas.winkler@intel.com \
    --cc=vitaly.lubart@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).