All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dave Jiang <dave.jiang@intel.com>
To: alex.williamson@redhat.com, kwankhede@nvidia.com,
	tglx@linutronix.de, vkoul@kernel.org, jgg@mellanox.com
Cc: megha.dey@intel.com, jacob.jun.pan@intel.com,
	ashok.raj@intel.com, yi.l.liu@intel.com, baolu.lu@intel.com,
	kevin.tian@intel.com, sanjay.k.kumar@intel.com,
	tony.luck@intel.com, dan.j.williams@intel.com,
	eric.auger@redhat.com, pbonzini@redhat.com,
	dmaengine@vger.kernel.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org
Subject: [PATCH v6 08/20] vfio/mdev: idxd: Add mdev device context initialization
Date: Fri, 21 May 2021 17:19:54 -0700	[thread overview]
Message-ID: <162164279478.261970.8966553743790451233.stgit@djiang5-desk3.ch.intel.com> (raw)
In-Reply-To: <162164243591.261970.3439987543338120797.stgit@djiang5-desk3.ch.intel.com>

Add support functions to initialize the vdcm context and the PCI
config space region and the MMIO region. These regions are to
support the emulation paths for the mdev.


Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/idxd/registers.h  |    3 +
 drivers/vfio/mdev/idxd/mdev.h |    4 +
 drivers/vfio/mdev/idxd/vdev.c |  214 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 220 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c2d558e37baf..8ac2be4e174b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -88,6 +88,9 @@ struct opcap {
 	u64 bits[4];
 };
 
+#define OPCAP_OFS(op) (op - (0x40 * (op >> 6)))
+#define OPCAP_BIT(op) (BIT_ULL(OPCAP_OFS(op)))
+
 #define IDXD_OPCAP_OFFSET		0x40
 
 #define IDXD_TABLE_OFFSET		0x60
diff --git a/drivers/vfio/mdev/idxd/mdev.h b/drivers/vfio/mdev/idxd/mdev.h
index e52b50760ee7..91cb2662abd6 100644
--- a/drivers/vfio/mdev/idxd/mdev.h
+++ b/drivers/vfio/mdev/idxd/mdev.h
@@ -16,6 +16,7 @@
 #define VIDXD_MSIX_TBL_SZ		0x90
 #define VIDXD_MSIX_PERM_TBL_SZ		0x48
 
+#define VIDXD_VERSION_OFFSET		0
 #define VIDXD_MSIX_PERM_OFFSET		0x300
 #define VIDXD_GRPCFG_OFFSET		0x400
 #define VIDXD_WQCFG_OFFSET		0x500
@@ -74,8 +75,9 @@ static inline u8 vidxd_state(struct vdcm_idxd *vidxd)
 
 int idxd_mdev_get_pasid(struct mdev_device *mdev, struct vfio_device *vdev, u32 *pasid);
 
+void vidxd_init(struct vdcm_idxd *vidxd);
 void vidxd_reset(struct vdcm_idxd *vidxd);
-
+void vidxd_mmio_init(struct vdcm_idxd *vidxd);
 int vidxd_cfg_read(struct vdcm_idxd *vidxd, unsigned int pos, void *buf, unsigned int count);
 int vidxd_cfg_write(struct vdcm_idxd *vidxd, unsigned int pos, void *buf, unsigned int size);
 #endif
diff --git a/drivers/vfio/mdev/idxd/vdev.c b/drivers/vfio/mdev/idxd/vdev.c
index 4ead50947047..78cc2377e637 100644
--- a/drivers/vfio/mdev/idxd/vdev.c
+++ b/drivers/vfio/mdev/idxd/vdev.c
@@ -21,6 +21,62 @@
 #include "idxd.h"
 #include "mdev.h"
 
+static u64 idxd_pci_config[] = {
+	0x0010000000008086ULL,
+	0x0080000008800000ULL,
+	0x000000000000000cULL,
+	0x000000000000000cULL,
+	0x0000000000000000ULL,
+	0x2010808600000000ULL,
+	0x0000004000000000ULL,
+	0x000000ff00000000ULL,
+	0x0000060000015011ULL, /* MSI-X capability, hardcoded 2 entries, Encoded as N-1 */
+	0x0000070000000000ULL,
+	0x0000000000920010ULL, /* PCIe capability */
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+	0x0000000000000000ULL,
+};
+
+static void vidxd_reset_config(struct vdcm_idxd *vidxd)
+{
+	u16 *devid = (u16 *)(vidxd->cfg + PCI_DEVICE_ID);
+	struct idxd_device *idxd = vidxd->idxd;
+
+	memset(vidxd->cfg, 0, VIDXD_MAX_CFG_SPACE_SZ);
+	memcpy(vidxd->cfg, idxd_pci_config, sizeof(idxd_pci_config));
+
+	if (idxd->data->type == IDXD_TYPE_DSA)
+		*devid = PCI_DEVICE_ID_INTEL_DSA_SPR0;
+	else if (idxd->data->type == IDXD_TYPE_IAX)
+		*devid = PCI_DEVICE_ID_INTEL_IAX_SPR0;
+}
+
+static inline void vidxd_reset_mmio(struct vdcm_idxd *vidxd)
+{
+	memset(&vidxd->bar0, 0, VIDXD_MAX_MMIO_SPACE_SZ);
+}
+
+void vidxd_init(struct vdcm_idxd *vidxd)
+{
+	struct idxd_wq *wq = vidxd->wq;
+
+	vidxd_reset_config(vidxd);
+	vidxd_reset_mmio(vidxd);
+
+	vidxd->bar_size[0] = VIDXD_BAR0_SIZE;
+	vidxd->bar_size[1] = VIDXD_BAR2_SIZE;
+
+	vidxd_mmio_init(vidxd);
+
+	if (wq_dedicated(wq) && wq->state == IDXD_WQ_ENABLED)
+		idxd_wq_disable(wq);
+}
+
 void vidxd_send_interrupt(struct vdcm_idxd *vidxd, int vector)
 {
 	struct mdev_device *mdev = vidxd->mdev;
@@ -252,6 +308,163 @@ int vidxd_cfg_write(struct vdcm_idxd *vidxd, unsigned int pos, void *buf, unsign
 	return 0;
 }
 
+static void vidxd_mmio_init_grpcap(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	union group_cap_reg *grp_cap = (union group_cap_reg *)(bar0 + IDXD_GRPCAP_OFFSET);
+
+	/* single group for current implementation */
+	grp_cap->num_groups = 1;
+}
+
+static void vidxd_mmio_init_grpcfg(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	struct grpcfg *grpcfg = (struct grpcfg *)(bar0 + VIDXD_GRPCFG_OFFSET);
+	struct idxd_wq *wq = vidxd->wq;
+	struct idxd_group *group = wq->group;
+	int i;
+
+	/*
+	 * At this point, we are only exporting a single workqueue for
+	 * each mdev.
+	 */
+	grpcfg->wqs[0] = BIT(0);
+	for (i = 0; i < group->num_engines; i++)
+		grpcfg->engines |= BIT(i);
+	grpcfg->flags.bits = group->grpcfg.flags.bits;
+}
+
+static void vidxd_mmio_init_wqcap(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	struct idxd_wq *wq = vidxd->wq;
+	union wq_cap_reg *wq_cap = (union wq_cap_reg *)(bar0 + IDXD_WQCAP_OFFSET);
+
+	wq_cap->total_wq_size = wq->size;
+	wq_cap->num_wqs = 1;
+	wq_cap->dedicated_mode = 1;
+}
+
+static void vidxd_mmio_init_wqcfg(struct vdcm_idxd *vidxd)
+{
+	struct idxd_device *idxd = vidxd->idxd;
+	struct idxd_wq *wq = vidxd->wq;
+	u8 *bar0 = vidxd->bar0;
+	union wqcfg *wqcfg = (union wqcfg *)(bar0 + VIDXD_WQCFG_OFFSET);
+
+	wqcfg->wq_size = wq->size;
+	wqcfg->wq_thresh = wq->threshold;
+	wqcfg->mode = WQCFG_MODE_DEDICATED;
+	wqcfg->priority = wq->priority;
+	wqcfg->max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+	wqcfg->max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+}
+
+static void vidxd_mmio_init_engcap(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	union engine_cap_reg *engcap = (union engine_cap_reg *)(bar0 + IDXD_ENGCAP_OFFSET);
+	struct idxd_wq *wq = vidxd->wq;
+	struct idxd_group *group = wq->group;
+
+	engcap->num_engines = group->num_engines;
+}
+
+static void vidxd_mmio_init_gencap(struct vdcm_idxd *vidxd)
+{
+	struct idxd_device *idxd = vidxd->idxd;
+	u8 *bar0 = vidxd->bar0;
+	union gen_cap_reg *gencap = (union gen_cap_reg *)(bar0 + IDXD_GENCAP_OFFSET);
+
+	gencap->overlap_copy = idxd->hw.gen_cap.overlap_copy;
+	gencap->cache_control_mem = idxd->hw.gen_cap.cache_control_mem;
+	gencap->cache_control_cache = idxd->hw.gen_cap.cache_control_cache;
+	gencap->cmd_cap = 1;
+	gencap->dest_readback = idxd->hw.gen_cap.dest_readback;
+	gencap->drain_readback = idxd->hw.gen_cap.drain_readback;
+	gencap->max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+	gencap->max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+	gencap->max_descs_per_engine = idxd->hw.gen_cap.max_descs_per_engine;
+}
+
+static void vidxd_mmio_init_cmdcap(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	u32 *cmdcap = (u32 *)(bar0 + IDXD_CMDCAP_OFFSET);
+
+	*cmdcap |= BIT(IDXD_CMD_ENABLE_DEVICE) | BIT(IDXD_CMD_DISABLE_DEVICE) |
+		   BIT(IDXD_CMD_DRAIN_ALL) | BIT(IDXD_CMD_ABORT_ALL) |
+		   BIT(IDXD_CMD_RESET_DEVICE) | BIT(IDXD_CMD_ENABLE_WQ) |
+		   BIT(IDXD_CMD_DISABLE_WQ) | BIT(IDXD_CMD_DRAIN_WQ) |
+		   BIT(IDXD_CMD_ABORT_WQ) | BIT(IDXD_CMD_RESET_WQ) |
+		   BIT(IDXD_CMD_DRAIN_PASID) | BIT(IDXD_CMD_ABORT_PASID) |
+		   BIT(IDXD_CMD_REQUEST_INT_HANDLE) | BIT(IDXD_CMD_RELEASE_INT_HANDLE);
+}
+
+static void vidxd_mmio_init_opcap(struct vdcm_idxd *vidxd)
+{
+	struct idxd_device *idxd = vidxd->idxd;
+	u64 opcode;
+	u8 *bar0 = vidxd->bar0;
+	u64 *opcap = (u64 *)(bar0 + IDXD_OPCAP_OFFSET);
+
+	if (idxd->data->type == IDXD_TYPE_DSA) {
+		opcode = BIT_ULL(DSA_OPCODE_NOOP) | BIT_ULL(DSA_OPCODE_BATCH) |
+			 BIT_ULL(DSA_OPCODE_DRAIN) | BIT_ULL(DSA_OPCODE_MEMMOVE) |
+			 BIT_ULL(DSA_OPCODE_MEMFILL) | BIT_ULL(DSA_OPCODE_COMPARE) |
+			 BIT_ULL(DSA_OPCODE_COMPVAL) | BIT_ULL(DSA_OPCODE_CR_DELTA) |
+			 BIT_ULL(DSA_OPCODE_AP_DELTA) | BIT_ULL(DSA_OPCODE_DUALCAST) |
+			 BIT_ULL(DSA_OPCODE_CRCGEN) | BIT_ULL(DSA_OPCODE_COPY_CRC) |
+			 BIT_ULL(DSA_OPCODE_DIF_CHECK) | BIT_ULL(DSA_OPCODE_DIF_INS) |
+			 BIT_ULL(DSA_OPCODE_DIF_STRP) | BIT_ULL(DSA_OPCODE_DIF_UPDT) |
+			 BIT_ULL(DSA_OPCODE_CFLUSH);
+		*opcap = opcode;
+	} else if (idxd->data->type == IDXD_TYPE_IAX) {
+		opcode = BIT_ULL(IAX_OPCODE_NOOP) | BIT_ULL(IAX_OPCODE_DRAIN) |
+			 BIT_ULL(IAX_OPCODE_MEMMOVE);
+		*opcap = opcode;
+		opcap++;
+		opcode = OPCAP_BIT(IAX_OPCODE_DECOMPRESS) |
+			 OPCAP_BIT(IAX_OPCODE_COMPRESS);
+		*opcap = opcode;
+	}
+}
+
+static void vidxd_mmio_init_version(struct vdcm_idxd *vidxd)
+{
+	struct idxd_device *idxd = vidxd->idxd;
+	u32 *version;
+
+	version = (u32 *)(vidxd->bar0 + VIDXD_VERSION_OFFSET);
+	*version = idxd->hw.version;
+}
+
+void vidxd_mmio_init(struct vdcm_idxd *vidxd)
+{
+	u8 *bar0 = vidxd->bar0;
+	union offsets_reg *offsets;
+
+	memset(vidxd->bar0, 0, VIDXD_BAR0_SIZE);
+
+	vidxd_mmio_init_version(vidxd);
+	vidxd_mmio_init_gencap(vidxd);
+	vidxd_mmio_init_wqcap(vidxd);
+	vidxd_mmio_init_grpcap(vidxd);
+	vidxd_mmio_init_engcap(vidxd);
+	vidxd_mmio_init_opcap(vidxd);
+
+	offsets = (union offsets_reg *)(bar0 + IDXD_TABLE_OFFSET);
+	offsets->grpcfg = VIDXD_GRPCFG_OFFSET / 0x100;
+	offsets->wqcfg = VIDXD_WQCFG_OFFSET / 0x100;
+	offsets->msix_perm = VIDXD_MSIX_PERM_OFFSET / 0x100;
+
+	vidxd_mmio_init_cmdcap(vidxd);
+	memset(bar0 + VIDXD_MSIX_PERM_OFFSET, 0, VIDXD_MSIX_PERM_TBL_SZ);
+	vidxd_mmio_init_grpcfg(vidxd);
+	vidxd_mmio_init_wqcfg(vidxd);
+}
+
 static void idxd_complete_command(struct vdcm_idxd *vidxd, enum idxd_cmdsts_err val)
 {
 	u8 *bar0 = vidxd->bar0;
@@ -396,6 +609,7 @@ void vidxd_reset(struct vdcm_idxd *vidxd)
 		}
 	}
 
+	vidxd_mmio_init(vidxd);
 	vwqcfg->wq_state = IDXD_WQ_DISABLED;
 	gensts->state = IDXD_DEVICE_STATE_DISABLED;
 	idxd_complete_command(vidxd, IDXD_CMDSTS_SUCCESS);



  parent reply	other threads:[~2021-05-22  0:20 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-22  0:19 [PATCH v6 00/20] Add VFIO mediated device support and DEV-MSI support for the idxd driver Dave Jiang
2021-05-22  0:19 ` [PATCH v6 01/20] vfio/mdev: idxd: add theory of operation documentation for idxd mdev Dave Jiang
2021-05-22  0:19 ` [PATCH v6 02/20] dmaengine: idxd: add external module driver support for dsa_bus_type Dave Jiang
2021-05-22  0:19 ` [PATCH v6 03/20] dmaengine: idxd: add IMS offset and size retrieval code Dave Jiang
2021-05-22  0:19 ` [PATCH v6 04/20] dmaengine: idxd: add portal offset for IMS portals Dave Jiang
2021-05-22  0:19 ` [PATCH v6 05/20] vfio: mdev: common lib code for setting up Interrupt Message Store Dave Jiang
2021-05-24  0:02   ` Jason Gunthorpe
2021-05-28  1:49     ` Dave Jiang
2021-05-28 12:21       ` Jason Gunthorpe
2021-05-28 16:37         ` Dave Jiang
2021-05-28 16:39           ` Jason Gunthorpe
2021-05-31 10:41             ` Thomas Gleixner
2021-05-31 13:48   ` Thomas Gleixner
2021-05-31 15:24     ` Jason Gunthorpe
2021-06-08 15:57     ` Dave Jiang
2021-06-08 17:22       ` Jason Gunthorpe
2021-06-10 13:00       ` Thomas Gleixner
2021-05-22  0:19 ` [PATCH v6 06/20] vfio/mdev: idxd: add PCI config for read/write for mdev Dave Jiang
2021-05-22  0:19 ` [PATCH v6 07/20] vfio/mdev: idxd: Add administrative commands emulation " Dave Jiang
2021-05-22  0:19 ` Dave Jiang [this message]
2021-05-22  0:20 ` [PATCH v6 09/20] vfio/mdev: Add mmio read/write support " Dave Jiang
2021-05-22  0:20 ` [PATCH v6 10/20] vfio/mdev: idxd: add mdev type as a new wq type Dave Jiang
2021-05-22  0:20 ` [PATCH v6 11/20] vfio/mdev: idxd: Add basic driver setup for idxd mdev Dave Jiang
2021-05-23 23:27   ` Jason Gunthorpe
2021-05-23 23:52   ` Jason Gunthorpe
2021-05-22  0:20 ` [PATCH v6 12/20] vfio: move VFIO PCI macros to common header Dave Jiang
2021-06-04  3:47   ` Alex Williamson
2021-05-22  0:20 ` [PATCH v6 13/20] vfio/mdev: idxd: add mdev driver registration and helper functions Dave Jiang
2021-05-23 23:32   ` Jason Gunthorpe
2021-05-23 23:46   ` Jason Gunthorpe
2021-05-22  0:20 ` [PATCH v6 14/20] vfio/mdev: idxd: add 1dwq-v1 mdev type Dave Jiang
2021-05-23 23:47   ` Jason Gunthorpe
2021-05-22  0:20 ` [PATCH v6 15/20] vfio/mdev: idxd: ims domain setup for the vdcm Dave Jiang
2021-05-23 23:50   ` Jason Gunthorpe
2021-05-27  0:22     ` Dave Jiang
2021-05-27  0:54       ` Jason Gunthorpe
2021-05-27  1:15         ` Dave Jiang
2021-05-27  1:41         ` Raj, Ashok
2021-05-27 13:36           ` Jason Gunthorpe
2021-05-31 14:02     ` Thomas Gleixner
2021-05-31 16:57       ` Jason Gunthorpe
2021-05-31 23:55         ` Thomas Gleixner
2021-06-01 12:16           ` Jason Gunthorpe
2021-05-22  0:20 ` [PATCH v6 16/20] vfio/mdev: idxd: add new wq state for mdev Dave Jiang
2021-05-22  0:20 ` [PATCH v6 17/20] vfio/mdev: idxd: add error notification from host driver to mediated device Dave Jiang
2021-05-22  0:20 ` [PATCH v6 18/20] vfio: move vfio_pci_set_ctx_trigger_single to common code Dave Jiang
2021-05-22  0:21 ` [PATCH v6 19/20] vfio: mdev: Add device request interface Dave Jiang
2021-05-23 22:38   ` Jason Gunthorpe
2021-05-22  0:21 ` [PATCH v6 20/20] vfio: mdev: idxd: setup request interrupt Dave Jiang
2021-05-23 23:22 ` [PATCH v6 00/20] Add VFIO mediated device support and DEV-MSI support for the idxd driver Jason Gunthorpe
2021-06-02 15:40   ` Dave Jiang
2021-06-02 23:17     ` Jason Gunthorpe
2021-06-03  1:11       ` Tian, Kevin
2021-06-03  1:49         ` Jason Gunthorpe
2021-06-03  5:52           ` Tian, Kevin
2021-06-03 11:23             ` Jason Gunthorpe
2021-06-04  3:40             ` Alex Williamson
2021-06-07  6:22               ` Tian, Kevin
2021-06-07 18:13                 ` Dave Jiang
2021-06-07 19:11                   ` Jason Gunthorpe
2021-06-08 16:02                     ` Dave Jiang
2021-06-11 18:21                       ` Dave Jiang
2021-06-11 18:29                         ` Jason Gunthorpe
2021-06-07 18:28                 ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=162164279478.261970.8966553743790451233.stgit@djiang5-desk3.ch.intel.com \
    --to=dave.jiang@intel.com \
    --cc=alex.williamson@redhat.com \
    --cc=ashok.raj@intel.com \
    --cc=baolu.lu@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=dmaengine@vger.kernel.org \
    --cc=eric.auger@redhat.com \
    --cc=jacob.jun.pan@intel.com \
    --cc=jgg@mellanox.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=kwankhede@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=megha.dey@intel.com \
    --cc=pbonzini@redhat.com \
    --cc=sanjay.k.kumar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vkoul@kernel.org \
    --cc=yi.l.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.