nvdimm.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: linux-cxl@vger.kernel.org
Cc: Ben Widawsky <ben.widawsky@intel.com>,
	vishal.l.verma@intel.com, alison.schofield@intel.com,
	nvdimm@lists.linux.dev, Jonathan.Cameron@huawei.com,
	ira.weiny@intel.com, ben.widawsky@intel.com,
	vishal.l.verma@intel.com, alison.schofield@intel.com
Subject: [PATCH v3 21/28] cxl/pmem: Translate NVDIMM label commands to CXL label commands
Date: Tue, 24 Aug 2021 09:07:18 -0700	[thread overview]
Message-ID: <162982123813.1124374.3721946437291753388.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <162982112370.1124374.2020303588105269226.stgit@dwillia2-desk3.amr.corp.intel.com>

The LIBNVDIMM IOCTL UAPI calls back to the nvdimm-bus-provider to
translate the Linux command payload to the device native command format.
The LIBNVDIMM commands get-config-size, get-config-data, and
set-config-data, map to the CXL memory device commands device-identify,
get-lsa, and set-lsa. Recall that the label-storage-area (LSA) on an
NVDIMM device arranges for the provisioning of namespaces. Additionally
for CXL the LSA is used for provisioning regions as well.

The data from device-identify is already cached in the 'struct cxl_mem'
instance associated with @cxl_nvd, so that payload return is simply
crafted and no CXL command is issued. The conversion for get-lsa is
straightforward, but the conversion for set-lsa requires an allocation
to append the set-lsa header in front of the payload.

Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/cxl/pmem.c |  121 ++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 117 insertions(+), 4 deletions(-)

diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 469b984176a2..6cc76302c8f8 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -52,10 +52,10 @@ static int cxl_nvdimm_probe(struct device *dev)
 {
 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+	unsigned long flags = 0, cmd_mask = 0;
 	struct cxl_mem *cxlm = cxlmd->cxlm;
 	struct cxl_nvdimm_bridge *cxl_nvb;
 	struct nvdimm *nvdimm = NULL;
-	unsigned long flags = 0;
 	int rc = -ENXIO;
 
 	cxl_nvb = cxl_find_nvdimm_bridge();
@@ -69,8 +69,11 @@ static int cxl_nvdimm_probe(struct device *dev)
 	set_exclusive_cxl_commands(cxlm, exclusive_cmds);
 
 	set_bit(NDD_LABELING, &flags);
-	nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0,
-			       NULL);
+	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+	nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
+			       cmd_mask, 0, NULL);
 	dev_set_drvdata(dev, nvdimm);
 	rc = devm_add_action_or_reset(dev, unregister_nvdimm, cxl_nvd);
 out:
@@ -89,11 +92,121 @@ static struct cxl_driver cxl_nvdimm_driver = {
 	.id = CXL_DEVICE_NVDIMM,
 };
 
+static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
+				    struct nd_cmd_get_config_size *cmd,
+				    unsigned int buf_len, int *cmd_rc)
+{
+	if (sizeof(*cmd) > buf_len)
+		return -EINVAL;
+
+	*cmd = (struct nd_cmd_get_config_size) {
+		 .config_size = cxlm->lsa_size,
+		 .max_xfer = cxlm->payload_size,
+	};
+	*cmd_rc = 0;
+
+	return 0;
+}
+
+static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
+				    struct nd_cmd_get_config_data_hdr *cmd,
+				    unsigned int buf_len, int *cmd_rc)
+{
+	struct cxl_mbox_get_lsa {
+		u32 offset;
+		u32 length;
+	} get_lsa;
+	int rc;
+
+	if (sizeof(*cmd) > buf_len)
+		return -EINVAL;
+	if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
+		return -EINVAL;
+
+	get_lsa = (struct cxl_mbox_get_lsa) {
+		.offset = cmd->in_offset,
+		.length = cmd->in_length,
+	};
+
+	rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
+				   sizeof(get_lsa), cmd->out_buf,
+				   cmd->in_length);
+	cmd->status = 0;
+	*cmd_rc = 0;
+
+	return rc;
+}
+
+static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
+				    struct nd_cmd_set_config_hdr *cmd,
+				    unsigned int buf_len, int *cmd_rc)
+{
+	struct cxl_mbox_set_lsa {
+		u32 offset;
+		u32 reserved;
+		u8 data[];
+	} *set_lsa;
+	int rc;
+
+	if (sizeof(*cmd) > buf_len)
+		return -EINVAL;
+
+	/* 4-byte status follows the input data in the payload */
+	if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
+		return -EINVAL;
+
+	set_lsa =
+		kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
+	if (!set_lsa)
+		return -ENOMEM;
+
+	*set_lsa = (struct cxl_mbox_set_lsa) {
+		.offset = cmd->in_offset,
+	};
+	memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
+
+	rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
+				   struct_size(set_lsa, data, cmd->in_length),
+				   NULL, 0);
+
+	/* set "firmware" status */
+	*(u32 *) &cmd->in_buf[cmd->in_length] = 0;
+	*cmd_rc = 0;
+	kvfree(set_lsa);
+
+	return rc;
+}
+
+static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
+			       void *buf, unsigned int buf_len, int *cmd_rc)
+{
+	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+	unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
+	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+	struct cxl_mem *cxlm = cxlmd->cxlm;
+
+	if (!test_bit(cmd, &cmd_mask))
+		return -ENOTTY;
+
+	switch (cmd) {
+	case ND_CMD_GET_CONFIG_SIZE:
+		return cxl_pmem_get_config_size(cxlm, buf, buf_len, cmd_rc);
+	case ND_CMD_GET_CONFIG_DATA:
+		return cxl_pmem_get_config_data(cxlm, buf, buf_len, cmd_rc);
+	case ND_CMD_SET_CONFIG_DATA:
+		return cxl_pmem_set_config_data(cxlm, buf, buf_len, cmd_rc);
+	default:
+		return -ENOTTY;
+	}
+}
+
 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
 			struct nvdimm *nvdimm, unsigned int cmd, void *buf,
 			unsigned int buf_len, int *cmd_rc)
 {
-	return -ENOTTY;
+	if (!nvdimm)
+		return -ENOTTY;
+	return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len, cmd_rc);
 }
 
 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)


  parent reply	other threads:[~2021-08-24 16:07 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-24 16:05 [PATCH v3 00/28] cxl_test: Enable CXL Topology and UAPI regression tests Dan Williams
2021-08-24 16:05 ` [PATCH v3 01/28] libnvdimm/labels: Introduce getters for namespace label fields Dan Williams
2021-08-24 16:05 ` [PATCH v3 02/28] libnvdimm/labels: Add isetcookie validation helper Dan Williams
2021-08-24 16:05 ` [PATCH v3 03/28] libnvdimm/labels: Introduce label setter helpers Dan Williams
2021-08-24 16:05 ` [PATCH v3 04/28] libnvdimm/labels: Add a checksum calculation helper Dan Williams
2021-08-24 16:05 ` [PATCH v3 05/28] libnvdimm/labels: Add blk isetcookie set / validation helpers Dan Williams
2021-08-24 16:05 ` [PATCH v3 06/28] libnvdimm/labels: Add blk special cases for nlabel and position helpers Dan Williams
2021-08-24 16:06 ` [PATCH v3 07/28] libnvdimm/labels: Add type-guid helpers Dan Williams
2021-08-24 16:06 ` [PATCH v3 08/28] libnvdimm/labels: Add claim class helpers Dan Williams
2021-08-24 16:06 ` [PATCH v3 09/28] libnvdimm/labels: Add address-abstraction uuid definitions Dan Williams
2021-08-24 16:06 ` [PATCH v3 10/28] libnvdimm/labels: Add uuid helpers Dan Williams
2021-08-24 16:06 ` [PATCH v3 11/28] libnvdimm/label: Add a helper for nlabel validation Dan Williams
2021-09-02 16:37   ` Jonathan Cameron
2021-08-24 16:06 ` [PATCH v3 12/28] libnvdimm/labels: Introduce the concept of multi-range namespace labels Dan Williams
2021-09-02 16:43   ` Jonathan Cameron
2021-08-24 16:06 ` [PATCH v3 13/28] libnvdimm/label: Define CXL region labels Dan Williams
2021-09-02 16:36   ` Jonathan Cameron
2021-09-02 16:41     ` Jonathan Cameron
2021-09-03  3:58       ` Dan Williams
2021-08-24 16:06 ` [PATCH v3 14/28] libnvdimm/labels: Introduce CXL labels Dan Williams
2021-09-03 17:00   ` Dan Williams
2021-08-24 16:06 ` [PATCH v3 15/28] cxl/pci: Make 'struct cxl_mem' device type generic Dan Williams
2021-09-02 16:55   ` Jonathan Cameron
2021-09-02 17:34     ` Dan Williams
2021-08-24 16:06 ` [PATCH v3 16/28] cxl/mbox: Introduce the mbox_send operation Dan Williams
2021-09-02 17:07   ` Jonathan Cameron
2021-08-24 16:06 ` [PATCH v3 17/28] cxl/mbox: Move mailbox and other non-PCI specific infrastructure to the core Dan Williams
2021-09-02 17:56   ` Jonathan Cameron
2021-09-02 18:56     ` Dan Williams
2021-08-24 16:07 ` [PATCH v3 18/28] cxl/pci: Use module_pci_driver Dan Williams
2021-09-02 17:58   ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 19/28] cxl/mbox: Convert 'enabled_cmds' to DECLARE_BITMAP Dan Williams
2021-09-02 17:59   ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 20/28] cxl/mbox: Add exclusive kernel command support Dan Williams
2021-09-02 18:09   ` Jonathan Cameron
2021-09-03 20:47     ` Dan Williams
2021-08-24 16:07 ` Dan Williams [this message]
2021-09-02 18:22   ` [PATCH v3 21/28] cxl/pmem: Translate NVDIMM label commands to CXL label commands Jonathan Cameron
2021-09-03 21:09     ` Dan Williams
2021-08-24 16:07 ` [PATCH v3 22/28] cxl/pmem: Add support for multiple nvdimm-bridge objects Dan Williams
2021-09-03 11:15   ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 23/28] cxl/acpi: Do not add DSDT disabled ACPI0016 host bridge ports Dan Williams
2021-09-02 18:30   ` Jonathan Cameron
2021-09-03 17:51     ` Dan Williams
2021-08-24 16:07 ` [PATCH v3 24/28] tools/testing/cxl: Introduce a mocked-up CXL port hierarchy Dan Williams
2021-09-03 12:52   ` Jonathan Cameron
2021-09-03 21:49     ` Dan Williams
2021-09-06  8:32       ` Jonathan Cameron
2021-09-07 15:57         ` Dan Williams
2021-08-24 16:07 ` [PATCH v3 25/28] cxl/bus: Populate the target list at decoder create Dan Williams
2021-09-03 12:59   ` Jonathan Cameron
2021-09-03 22:43     ` Dan Williams
2021-09-06  8:52       ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 26/28] cxl/mbox: Move command definitions to common location Dan Williams
2021-09-03 13:04   ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 27/28] tools/testing/cxl: Introduce a mock memory device + driver Dan Williams
2021-09-03 13:21   ` Jonathan Cameron
2021-09-03 23:33     ` Dan Williams
2021-09-06  8:57       ` Jonathan Cameron
2021-08-24 16:07 ` [PATCH v3 28/28] cxl/core: Split decoder setup into alloc + add Dan Williams
2021-09-03 13:33   ` Jonathan Cameron
2021-09-03 16:26     ` Dan Williams
2021-09-03 18:01       ` Jonathan Cameron
2021-09-04  0:27         ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=162982123813.1124374.3721946437291753388.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=alison.schofield@intel.com \
    --cc=ben.widawsky@intel.com \
    --cc=ira.weiny@intel.com \
    --cc=linux-cxl@vger.kernel.org \
    --cc=nvdimm@lists.linux.dev \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).