linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ben Widawsky <ben.widawsky@intel.com>
To: linux-cxl@vger.kernel.org
Cc: Ben Widawsky <ben.widawsky@intel.com>,
	linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
	linux-acpi@vger.kernel.org, Ira Weiny <ira.weiny@intel.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Vishal Verma <vishal.l.verma@intel.com>,
	"Kelley, Sean V" <sean.v.kelley@intel.com>,
	Rafael Wysocki <rafael.j.wysocki@intel.com>,
	Bjorn Helgaas <helgaas@kernel.org>,
	Jonathan Cameron <Jonathan.Cameron@Huawei.com>,
	Jon Masters <jcm@jonmasters.org>,
	Chris Browy <cbrowy@avery-design.com>,
	Randy Dunlap <rdunlap@infradead.org>,
	Christoph Hellwig <hch@infradead.org>
Subject: [RFC PATCH v2 06/14] cxl/mem: Find device capabilities
Date: Tue,  8 Dec 2020 16:24:10 -0800	[thread overview]
Message-ID: <20201209002418.1976362-7-ben.widawsky@intel.com> (raw)
In-Reply-To: <20201209002418.1976362-1-ben.widawsky@intel.com>

CXL devices contain an array of capabilities that describe the
interactions software can interact with the device, or firmware running
on the device. A CXL compliant device must implement the device status
and the mailbox capability. A CXL compliant memory device must implement
the memory device capability.

Each of the capabilities can [will] provide an offset within the MMIO
region for interacting with the CXL device.

Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
---
 drivers/cxl/cxl.h | 92 ++++++++++++++++++++++++++++++++++++++++++++++-
 drivers/cxl/mem.c | 74 +++++++++++++++++++++++++++++++++++++-
 2 files changed, 164 insertions(+), 2 deletions(-)

diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index d81d0ba4617c..91bb40f73ddc 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -4,6 +4,38 @@
 #ifndef __CXL_H__
 #define __CXL_H__
 
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+#define CXL_SET_FIELD(value, field)                                            \
+	({                                                                     \
+		WARN_ON(!FIELD_FIT(field##_MASK, value));                      \
+		FIELD_PREP(field##_MASK, value);                               \
+	})
+
+#define CXL_GET_FIELD(word, field) FIELD_GET(field##_MASK, word)
+
+/* Device */
+#define CXLDEV_CAP_ARRAY_REG 0x0
+#define CXLDEV_CAP_ARRAY_CAP_ID 0
+#define CXLDEV_CAP_ARRAY_ID(x) ((x) & (0xffff))
+#define CXLDEV_CAP_ARRAY_COUNT(x) (((x) >> 32) & 0xffff)
+
+#define CXL_CAP_CAP_ID_DEVICE_STATUS 0x1
+#define CXL_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
+#define CXL_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
+#define CXL_CAP_CAP_ID_MEMDEV 0x4000
+
+/* Mailbox (CXL 2.0 - 8.2.8.4) */
+#define CXLDEV_MB_CAPS_OFFSET 0x00
+#define   CXLDEV_MB_CAP_PAYLOAD_SIZE_MASK GENMASK(5, 0)
+#define   CXLDEV_MB_CAP_PAYLOAD_SIZE_SHIFT 0
+#define CXLDEV_MB_CTRL_OFFSET 0x04
+#define CXLDEV_MB_CMD_OFFSET 0x08
+#define CXLDEV_MB_STATUS_OFFSET 0x10
+#define CXLDEV_MB_BG_CMD_STATUS_OFFSET 0x18
+
 /**
  * struct cxl_mem - A CXL memory device
  * @pdev: The PCI device associated with this CXL device.
@@ -12,6 +44,64 @@
 struct cxl_mem {
 	struct pci_dev *pdev;
 	void __iomem *regs;
+
+	/* Cap 0001h - CXL_CAP_CAP_ID_DEVICE_STATUS */
+	struct {
+		void __iomem *regs;
+	} status;
+
+	/* Cap 0002h - CXL_CAP_CAP_ID_PRIMARY_MAILBOX */
+	struct {
+		void __iomem *regs;
+		size_t payload_size;
+	} mbox;
+
+	/* Cap 4000h - CXL_CAP_CAP_ID_MEMDEV */
+	struct {
+		void __iomem *regs;
+	} mem;
 };
 
-#endif
+#define cxl_reg(type)                                                          \
+	static inline void cxl_write_##type##_reg32(struct cxl_mem *cxlm,      \
+						    u32 reg, u32 value)        \
+	{                                                                      \
+		void __iomem *reg_addr = cxlm->type.regs;                      \
+		writel(value, reg_addr + reg);                                 \
+	}                                                                      \
+	static inline void cxl_write_##type##_reg64(struct cxl_mem *cxlm,      \
+						    u32 reg, u64 value)        \
+	{                                                                      \
+		void __iomem *reg_addr = cxlm->type.regs;                      \
+		writeq(value, reg_addr + reg);                                 \
+	}                                                                      \
+	static inline u32 cxl_read_##type##_reg32(struct cxl_mem *cxlm,        \
+						  u32 reg)                     \
+	{                                                                      \
+		void __iomem *reg_addr = cxlm->type.regs;                      \
+		return readl(reg_addr + reg);                                  \
+	}                                                                      \
+	static inline u64 cxl_read_##type##_reg64(struct cxl_mem *cxlm,        \
+						  u32 reg)                     \
+	{                                                                      \
+		void __iomem *reg_addr = cxlm->type.regs;                      \
+		return readq(reg_addr + reg);                                  \
+	}
+
+cxl_reg(status);
+cxl_reg(mbox);
+
+static inline u32 __cxl_read_reg32(struct cxl_mem *cxlm, u32 reg)
+{
+	void __iomem *reg_addr = cxlm->regs;
+
+	return readl(reg_addr + reg);
+}
+
+static inline u64 __cxl_read_reg64(struct cxl_mem *cxlm, u32 reg)
+{
+	void __iomem *reg_addr = cxlm->regs;
+
+	return readq(reg_addr + reg);
+}
+#endif /* __CXL_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 06113d306cd2..8ea830ffdbba 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -7,6 +7,75 @@
 #include "pci.h"
 #include "cxl.h"
 
+/**
+ * cxl_mem_setup_regs() - Setup necessary MMIO.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Return: 0 if all necessary registers mapped.
+ *
+ * A memory device is required by spec to implement a certain set of MMIO
+ * regions. The purpose of this function is to enumerate and map those
+ * registers.
+ */
+static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
+{
+	u64 cap_array;
+	int cap;
+
+	/*
+	 * Register accessors need the mappings set up by this function, so the
+	 * raw versions of the mmio accessors must be used instead.
+	 */
+	cap_array = __cxl_read_reg64(cxlm, CXLDEV_CAP_ARRAY_REG);
+	if (CXLDEV_CAP_ARRAY_ID(cap_array) != CXLDEV_CAP_ARRAY_CAP_ID)
+		return -ENODEV;
+
+	for (cap = 1; cap <= CXLDEV_CAP_ARRAY_COUNT(cap_array); cap++) {
+		void *__iomem register_block;
+		u32 offset;
+		u16 cap_id;
+
+		cap_id = __cxl_read_reg32(cxlm, cap * 0x10) & 0xffff;
+		offset = __cxl_read_reg32(cxlm, cap * 0x10 + 0x4);
+		register_block = cxlm->regs + offset;
+
+		switch (cap_id) {
+		case CXL_CAP_CAP_ID_DEVICE_STATUS:
+			dev_dbg(&cxlm->pdev->dev,
+				"found Status capability (0x%x)\n", offset);
+			cxlm->status.regs = register_block;
+			break;
+		case CXL_CAP_CAP_ID_PRIMARY_MAILBOX:
+			dev_dbg(&cxlm->pdev->dev,
+				"found Mailbox capability (0x%x)\n", offset);
+			cxlm->mbox.regs = register_block;
+			cxlm->mbox.payload_size =
+				CXL_GET_FIELD(cap, CXLDEV_MB_CAP_PAYLOAD_SIZE);
+			break;
+		case CXL_CAP_CAP_ID_SECONDARY_MAILBOX:
+			dev_dbg(&cxlm->pdev->dev,
+				"found Secondary Mailbox capability (0x%x)\n",
+				offset);
+			break;
+		case CXL_CAP_CAP_ID_MEMDEV:
+			dev_dbg(&cxlm->pdev->dev,
+				"found Memory Device capability (0x%x)\n",
+				offset);
+			cxlm->mem.regs = register_block;
+			break;
+		default:
+			dev_err(&cxlm->pdev->dev, "Unknown cap ID: %d (0x%x)\n",
+				cap_id, offset);
+			return -ENXIO;
+		}
+	}
+
+	if (!cxlm->status.regs || !cxlm->mbox.regs || !cxlm->mem.regs)
+		return -ENXIO;
+
+	return 0;
+}
+
 /**
  * cxl_mem_create() - Create a new &struct cxl_mem.
  * @pdev: The pci device associated with the new &struct cxl_mem.
@@ -126,7 +195,10 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		}
 	}
 
-	return rc;
+	if (rc)
+		return rc;
+
+	return cxl_mem_setup_regs(cxlm);
 }
 
 static const struct pci_device_id cxl_mem_pci_tbl[] = {
-- 
2.29.2


  parent reply	other threads:[~2020-12-09  0:26 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-09  0:24 [RFC PATCH v2 00/14] CXL 2.0 Support Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 01/14] docs: cxl: Add basic documentation Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 02/14] cxl/acpi: Add an acpi_cxl module for the CXL interconnect Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 03/14] cxl/acpi: add OSC support Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 04/14] cxl/mem: Introduce a driver for CXL-2.0-Type-3 endpoints Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 05/14] cxl/mem: Map memory device registers Ben Widawsky
2020-12-09  0:24 ` Ben Widawsky [this message]
2020-12-09  0:24 ` [RFC PATCH v2 07/14] cxl/mem: Implement polled mode mailbox Ben Widawsky
2021-01-07 19:05   ` Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH v2 08/14] cxl/mem: Register CXL memX devices Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 09/14] cxl/mem: Add basic IOCTL interface Ben Widawsky
2020-12-09  1:37   ` Dan Williams
2020-12-09  2:12     ` Ben Widawsky
2020-12-09  3:33       ` Dan Williams
2020-12-09 16:23         ` Ben Widawsky
2020-12-10  3:32   ` Randy Dunlap
2020-12-14 17:29     ` Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 10/14] cxl/mem: Add send command Ben Widawsky
2020-12-09 22:06   ` Dan Williams
2020-12-15 21:43     ` Ben Widawsky
2020-12-15 22:03       ` Dan Williams
2020-12-15 22:17         ` Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 11/14] cxl/mem: Add a "RAW" " Ben Widawsky
2020-12-09 22:38   ` Dan Williams
2020-12-16 20:42     ` Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 12/14] cxl: Add basic debugging Ben Widawsky
2020-12-09  1:17   ` Dan Williams
2020-12-09  2:04     ` Ben Widawsky
2020-12-09  3:06       ` Dan Williams
2020-12-16 21:02         ` Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 13/14] MAINTAINERS: Add maintainers of the CXL driver Ben Widawsky
2020-12-09  0:24 ` [RFC PATCH 14/14] WIP/cxl/mem: Add get firmware for testing Ben Widawsky
2020-12-10  0:42   ` Dan Williams
2020-12-09  0:33 ` [RFC PATCH v2 00/14] CXL 2.0 Support Verma, Vishal L
2020-12-09  0:47 ` Ben Widawsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201209002418.1976362-7-ben.widawsky@intel.com \
    --to=ben.widawsky@intel.com \
    --cc=Jonathan.Cameron@Huawei.com \
    --cc=cbrowy@avery-design.com \
    --cc=dan.j.williams@intel.com \
    --cc=hch@infradead.org \
    --cc=helgaas@kernel.org \
    --cc=ira.weiny@intel.com \
    --cc=jcm@jonmasters.org \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-cxl@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=sean.v.kelley@intel.com \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).