nvdimm.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
* [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms
@ 2020-10-06  1:00 Santosh Sivaraj
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
  2020-12-07 22:00 ` [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Dan Williams
  0 siblings, 2 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-06  1:00 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Aneesh Kumar K.V, Vaibhav Jain,
	Shivaprasad G Bhat, Harish Sriram
  Cc: Santosh Sivaraj

The current test module cannot be used for testing platforms (make check)
that do no have support for NFIT. In order to get the ndctl tests working,
we need a module which can emulate NVDIMM devices without relying on
ACPI/NFIT.

The aim of this proposed module is to implement a similar functionality to the
existing module but without the ACPI dependencies. Currently interleaving and
error injection are not implemented.

Corresponding changes for ndctl is also required, to skip tests that depend
on nfit attributes, which will be sent as a reply to this.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 tools/testing/nvdimm/config_check.c |   3 +-
 tools/testing/nvdimm/test/Kbuild    |   6 +-
 tools/testing/nvdimm/test/ndtest.c  | 819 ++++++++++++++++++++++++++++
 tools/testing/nvdimm/test/ndtest.h  |  65 +++
 4 files changed, 891 insertions(+), 2 deletions(-)
 create mode 100644 tools/testing/nvdimm/test/ndtest.c
 create mode 100644 tools/testing/nvdimm/test/ndtest.h

diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c
index cac891028cd1..3e3a5f518864 100644
--- a/tools/testing/nvdimm/config_check.c
+++ b/tools/testing/nvdimm/config_check.c
@@ -12,7 +12,8 @@ void check(void)
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_PFN));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
-	BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
+	if (IS_ENABLED(CONFIG_ACPI_NFIT))
+		BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX_PMEM));
 }
diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild
index 75baebf8f4ba..197bcb2b7f35 100644
--- a/tools/testing/nvdimm/test/Kbuild
+++ b/tools/testing/nvdimm/test/Kbuild
@@ -5,5 +5,9 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
 obj-m += nfit_test.o
 obj-m += nfit_test_iomap.o
 
-nfit_test-y := nfit.o
+ifeq  ($(CONFIG_ACPI_NFIT),m)
+	nfit_test-y := nfit.o
+else
+	nfit_test-y := ndtest.o
+endif
 nfit_test_iomap-y := iomap.o
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
new file mode 100644
index 000000000000..415a40345584
--- /dev/null
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/list_sort.h>
+#include <linux/libnvdimm.h>
+#include <linux/ndctl.h>
+#include <nd-core.h>
+#include <linux/printk.h>
+
+#include "../watermark.h"
+#include "nfit_test.h"
+#include "ndtest.h"
+
+enum {
+	DIMM_SIZE = SZ_32M,
+	LABEL_SIZE = SZ_128K,
+	NUM_INSTANCES = 2,
+	NUM_DCR = 4,
+};
+
+#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm)	 \
+	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
+	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
+
+static struct ndtest_dimm dimm_group1[] = {
+	{
+		.type = NDTEST_REGION_TYPE_BLK | NDTEST_REGION_TYPE_PMEM,
+		.size = DIMM_SIZE,
+		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
+		.uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
+		.physical_id = 0,
+	},
+	{
+		.type = NDTEST_REGION_TYPE_PMEM,
+		.size = DIMM_SIZE,
+		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
+		.uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
+		.physical_id = 1,
+	},
+	{
+		.type = NDTEST_REGION_TYPE_PMEM,
+		.size = DIMM_SIZE * 2,
+		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
+		.uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
+		.physical_id = 2,
+	},
+	{
+		.type = NDTEST_REGION_TYPE_BLK,
+		.size = DIMM_SIZE,
+		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
+		.uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
+		.physical_id = 3,
+	},
+	{
+		.type = NDTEST_REGION_TYPE_PMEM,
+		.size = DIMM_SIZE,
+		.handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
+		.uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
+		.physical_id = 4,
+	},
+};
+
+static struct ndtest_dimm dimm_group2[] = {
+	{
+		.type = NDTEST_REGION_TYPE_PMEM,
+		.size = DIMM_SIZE,
+		.handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
+		.uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
+		.physical_id = 0,
+	},
+};
+
+static struct ndtest_config bus_configs[NUM_INSTANCES] = {
+	/* bus 1 */
+	{
+		.dimm_start = 0,
+		.dimm_count = ARRAY_SIZE(dimm_group1),
+		.dimm = dimm_group1,
+	},
+	/* bus 2 */
+	{
+		.dimm_start = ARRAY_SIZE(dimm_group1),
+		.dimm_count = ARRAY_SIZE(dimm_group2),
+		.dimm = dimm_group2,
+	},
+};
+
+static DEFINE_SPINLOCK(ndtest_lock);
+static struct ndtest_priv *instances[NUM_INSTANCES];
+static struct class *ndtest_dimm_class;
+static struct gen_pool *ndtest_pool;
+
+static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	return container_of(pdev, struct ndtest_priv, pdev);
+}
+
+static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
+			     struct nd_cmd_get_config_data_hdr *hdr)
+{
+	unsigned int len;
+
+	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
+		return -EINVAL;
+
+	hdr->status = 0;
+	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
+	memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
+
+	return buf_len - len;
+}
+
+static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
+			     struct nd_cmd_set_config_hdr *hdr)
+{
+	unsigned int len;
+
+	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
+		return -EINVAL;
+
+	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
+	memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
+
+	return buf_len - len;
+}
+
+static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
+		     struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+		     unsigned int buf_len, int *cmd_rc)
+{
+	struct nd_cmd_get_config_size *size;
+	struct ndtest_dimm *p;
+	int _cmd_rc;
+
+	if (!cmd_rc)
+		cmd_rc = &_cmd_rc;
+
+	*cmd_rc = 0;
+
+	if (!nvdimm)
+		return -EINVAL;
+
+	p = nvdimm_provider_data(nvdimm);
+	if (!p)
+		return -EINVAL;
+
+	/* Failures for a DIMM can be injected using fail_cmd and
+	 * fail_cmd_code, see the device attributes below
+	 */
+	if (p->fail_cmd)
+		return p->fail_cmd_code ? p->fail_cmd_code : -EIO;
+
+	switch (cmd) {
+	case ND_CMD_GET_CONFIG_SIZE:
+		size = (struct nd_cmd_get_config_size *) buf;
+		size->status = 0;
+		size->max_xfer = 8;
+		size->config_size = p->config_size;
+		*cmd_rc = 0;
+		break;
+
+	case ND_CMD_GET_CONFIG_DATA:
+		*cmd_rc = ndtest_config_get(p, buf_len, buf);
+		break;
+
+	case ND_CMD_SET_CONFIG_DATA:
+		*cmd_rc = ndtest_config_set(p, buf_len, buf);
+		break;
+	default:
+		dev_dbg(p->dev, "invalid command %u\n", cmd);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%#x\n", dimm->handle);
+}
+static DEVICE_ATTR_RO(handle);
+
+static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%#lx\n", dimm->fail_cmd);
+}
+
+static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+	unsigned long val;
+	ssize_t rc;
+
+	rc = kstrtol(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	dimm->fail_cmd = val;
+	return size;
+}
+static DEVICE_ATTR_RW(fail_cmd);
+
+static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dimm->fail_cmd_code);
+}
+
+static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+	unsigned long val;
+	ssize_t rc;
+
+	rc = kstrtol(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	dimm->fail_cmd_code = val;
+	return size;
+}
+static DEVICE_ATTR_RW(fail_cmd_code);
+
+static struct attribute *dimm_attributes[] = {
+	&dev_attr_handle.attr,
+	&dev_attr_fail_cmd.attr,
+	&dev_attr_fail_cmd_code.attr,
+	NULL,
+};
+
+static struct attribute_group dimm_attribute_group = {
+	.attrs = dimm_attributes,
+};
+
+static const struct attribute_group *dimm_attribute_groups[] = {
+	&dimm_attribute_group,
+	NULL,
+};
+
+static void put_dimms(void *data)
+{
+	struct ndtest_priv *p = data;
+	int i;
+
+	for (i = 0; i < p->config->dimm_count; i++)
+		if (p->config->dimm[i].dev) {
+			device_unregister(p->config->dimm[i].dev);
+			p->config->dimm[i].dev = NULL;
+		}
+}
+
+#define NDTEST_SCM_DIMM_CMD_MASK	   \
+	((1ul << ND_CMD_GET_CONFIG_SIZE) | \
+	 (1ul << ND_CMD_GET_CONFIG_DATA) | \
+	 (1ul << ND_CMD_SET_CONFIG_DATA))
+
+static ssize_t phys_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+	return sprintf(buf, "%#x\n", dimm->physical_id);
+}
+static DEVICE_ATTR_RO(phys_id);
+
+static ssize_t vendor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "0x1234567\n");
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+	return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
+		       0xa, 2016, ~(dimm->handle));
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t nvdimm_handle_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+	return sprintf(buf, "%#x\n", dimm->handle);
+}
+
+static struct device_attribute dev_attr_nvdimm_show_handle =  {
+	.attr	= { .name = "handle", .mode = 0444 },
+	.show	= nvdimm_handle_show,
+};
+
+static ssize_t subsystem_vendor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "0x%04x\n", 0);
+}
+static DEVICE_ATTR_RO(subsystem_vendor);
+
+static ssize_t dirty_shutdown_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", 42);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
+static struct attribute *ndtest_nvdimm_attributes[] = {
+	&dev_attr_nvdimm_show_handle.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_id.attr,
+	&dev_attr_phys_id.attr,
+	&dev_attr_subsystem_vendor.attr,
+	&dev_attr_dirty_shutdown.attr,
+	NULL,
+};
+
+static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
+					struct attribute *a, int n)
+{
+	return a->mode;
+}
+
+static const struct attribute_group ndtest_nvdimm_attribute_group = {
+	.attrs = ndtest_nvdimm_attributes,
+	.is_visible = ndtest_nvdimm_attr_visible,
+};
+
+static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
+	&ndtest_nvdimm_attribute_group,
+	NULL,
+};
+
+static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
+		void *iobuf, u64 len, int rw)
+{
+	struct ndtest_dimm *dimm = ndbr->blk_provider_data;
+	struct ndtest_blk_mmio *mmio = dimm->mmio;
+	struct nd_region *nd_region = &ndbr->nd_region;
+	unsigned int lane;
+
+	lane = nd_region_acquire_lane(nd_region);
+
+	if (rw)
+		memcpy(mmio->base + dpa, iobuf, len);
+	else {
+		memcpy(iobuf, mmio->base + dpa, len);
+		arch_invalidate_pmem(mmio->base + dpa, len);
+	}
+
+	nd_region_release_lane(nd_region, lane);
+
+	return 0;
+}
+
+static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
+				    struct device *dev)
+{
+	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+	struct nvdimm *nvdimm;
+	struct ndtest_dimm *p;
+	struct ndtest_blk_mmio *mmio;
+
+	nvdimm = nd_blk_region_to_dimm(ndbr);
+	p = nvdimm_provider_data(nvdimm);
+
+	nd_blk_region_set_provider_data(ndbr, p);
+	p->region = to_nd_region(dev);
+
+	mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
+	if (!mmio)
+		return -ENOMEM;
+
+	mmio->base = devm_nvdimm_memremap(dev, p->address, 12,
+					 nd_blk_memremap_flags(ndbr));
+	if (!mmio->base) {
+		dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
+		return -ENOMEM;
+	}
+
+	p->mmio = mmio;
+
+	return 0;
+}
+
+static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
+{
+	int i;
+
+	for (i = 0; i < NUM_INSTANCES; i++) {
+		struct nfit_test_resource *n, *nfit_res = NULL;
+		struct ndtest_priv *t = instances[i];
+
+		if (!t)
+			continue;
+		spin_lock(&ndtest_lock);
+		list_for_each_entry(n, &t->resources, list) {
+			if (addr >= n->res.start && (addr < n->res.start
+						+ resource_size(&n->res))) {
+				nfit_res = n;
+				break;
+			} else if (addr >= (unsigned long) n->buf
+					&& (addr < (unsigned long) n->buf
+						+ resource_size(&n->res))) {
+				nfit_res = n;
+				break;
+			}
+		}
+		spin_unlock(&ndtest_lock);
+		if (nfit_res)
+			return nfit_res;
+	}
+
+	pr_warn("Failed to get resource\n");
+
+	return NULL;
+}
+
+static void ndtest_release_resource(void *data)
+{
+	struct nfit_test_resource *res  = data;
+
+	spin_lock(&ndtest_lock);
+	list_del(&res->list);
+	spin_unlock(&ndtest_lock);
+
+	if (resource_size(&res->res) >= DIMM_SIZE)
+		gen_pool_free(ndtest_pool, res->res.start,
+				resource_size(&res->res));
+	vfree(res->buf);
+	kfree(res);
+}
+
+static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
+				   dma_addr_t *dma)
+{
+	dma_addr_t __dma;
+	void *buf;
+	struct nfit_test_resource *res;
+	struct genpool_data_align data = {
+		.align = SZ_128M,
+	};
+
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return NULL;
+
+	buf = vmalloc(size);
+	if (size >= DIMM_SIZE)
+		__dma = gen_pool_alloc_algo(ndtest_pool, size,
+					  gen_pool_first_fit_align, &data);
+	else
+		__dma = (unsigned long) buf;
+
+	if (!__dma)
+		goto buf_err;
+
+	INIT_LIST_HEAD(&res->list);
+	res->dev = &p->pdev.dev;
+	res->buf = buf;
+	res->res.start = __dma;
+	res->res.end = __dma + size - 1;
+	res->res.name = "NFIT";
+	spin_lock_init(&res->lock);
+	INIT_LIST_HEAD(&res->requests);
+	spin_lock(&ndtest_lock);
+	list_add(&res->list, &p->resources);
+	spin_unlock(&ndtest_lock);
+
+	if (dma)
+		*dma = __dma;
+
+	if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
+		return res->buf;
+
+buf_err:
+	if (__dma && size >= DIMM_SIZE)
+		gen_pool_free(ndtest_pool, __dma, size);
+	if (buf)
+		vfree(buf);
+	kfree(res);
+
+	return NULL;
+}
+
+static int ndtest_dimm_register(struct ndtest_priv *priv,
+				struct ndtest_dimm *dimm, int id)
+{
+	struct device *dev = &priv->pdev.dev;
+	struct nd_mapping_desc mapping;
+	struct nd_region_desc *ndr_desc;
+	struct nd_blk_region_desc ndbr_desc;
+	unsigned long dimm_flags = 0;
+
+	if (dimm->type == NDTEST_REGION_TYPE_PMEM) {
+		set_bit(NDD_ALIASING, &dimm_flags);
+		if (priv->pdev.id == 0)
+			set_bit(NDD_LABELING, &dimm_flags);
+	}
+
+	dimm->nvdimm = nvdimm_create(priv->bus, dimm,
+				    ndtest_nvdimm_attribute_groups, dimm_flags,
+				    NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
+	if (!dimm->nvdimm) {
+		dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
+		return -ENXIO;
+	}
+
+	memset(&mapping, 0, sizeof(mapping));
+	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
+
+	/* now add the region */
+	memset(&mapping, 0, sizeof(mapping));
+	mapping.nvdimm = dimm->nvdimm;
+	mapping.start = dimm->res.start;
+	mapping.size = dimm->size;
+
+	ndr_desc = &ndbr_desc.ndr_desc;
+	memset(ndr_desc, 0, sizeof(*ndr_desc));
+	ndr_desc->res = &dimm->res;
+	ndr_desc->provider_data = dimm;
+	ndr_desc->mapping = &mapping;
+	ndr_desc->num_mappings = 1;
+	ndr_desc->nd_set = &dimm->nd_set;
+	ndr_desc->num_lanes = 1;
+
+	if (dimm->type & NDTEST_REGION_TYPE_BLK) {
+		ndbr_desc.enable = ndtest_blk_region_enable;
+		ndbr_desc.do_io = ndtest_blk_do_io;
+		dimm->region = nvdimm_blk_region_create(priv->bus, ndr_desc);
+	} else
+		dimm->region = nvdimm_pmem_region_create(priv->bus, ndr_desc);
+
+	if (!dimm->region) {
+		dev_err(dev, "Error registering region %pR\n", ndr_desc->res);
+		return -ENXIO;
+	}
+
+	dimm->dev = device_create_with_groups(ndtest_dimm_class,
+					     &priv->pdev.dev,
+					     0, dimm, dimm_attribute_groups,
+					     "test_dimm%d", id);
+	if (!dimm->dev)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int ndtest_nvdimm_init(struct ndtest_priv *p)
+{
+	struct ndtest_dimm *d;
+	u64 uuid[2];
+	void *res;
+	int i, id;
+
+	for (i = 0; i < p->config->dimm_count; i++) {
+		d = &p->config->dimm[i];
+		d->id = id = p->config->dimm_start + i;
+		res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
+		if (!res)
+			return -ENOMEM;
+
+		d->label_area = res;
+		sprintf(d->label_area, "label%d", id);
+		d->config_size = LABEL_SIZE;
+		d->res.name = p->pdev.name;
+
+		if (uuid_parse(d->uuid_str, (uuid_t *) uuid))
+			pr_err("failed to parse UUID\n");
+
+		d->nd_set.cookie1 = cpu_to_le64(uuid[0]);
+		d->nd_set.cookie2 = cpu_to_le64(uuid[1]);
+
+		switch (d->type) {
+		case NDTEST_REGION_TYPE_PMEM:
+			/* setup the resource */
+			res = ndtest_alloc_resource(p, d->size,
+						    &d->res.start);
+			if (!res)
+				return -ENOMEM;
+
+			d->res.end = d->res.start + d->size - 1;
+			break;
+		case NDTEST_REGION_TYPE_BLK:
+			WARN_ON(p->nblks > NUM_DCR);
+
+			if (!ndtest_alloc_resource(p, d->size,
+						   &p->dimm_dma[p->nblks]))
+				return -ENOMEM;
+
+			if (!ndtest_alloc_resource(p, LABEL_SIZE,
+				    &p->label_dma[p->nblks]))
+				return -ENOMEM;
+
+			if (!ndtest_alloc_resource(p, LABEL_SIZE,
+				    &p->dcr_dma[p->nblks]))
+				return -ENOMEM;
+
+			d->address = p->dimm_dma[p->nblks];
+			p->nblks++;
+
+			break;
+		}
+
+		ndtest_dimm_register(p, d, id);
+	}
+
+	return 0;
+}
+
+static int ndtest_bus_register(struct ndtest_priv *p,
+			       struct ndtest_config *config)
+{
+	p->config = &config[p->pdev.id];
+
+	p->bus_desc.ndctl = ndtest_ctl;
+	p->bus_desc.module = THIS_MODULE;
+	p->bus_desc.provider_name = NULL;
+	p->bus_desc.cmd_mask =
+		1UL << ND_CMD_ARS_CAP | 1UL << ND_CMD_ARS_START |
+		1UL << ND_CMD_ARS_STATUS | 1UL << ND_CMD_CLEAR_ERROR |
+		1UL << ND_CMD_CALL;
+
+	p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
+	if (!p->bus) {
+		dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int ndtest_probe(struct platform_device *pdev)
+{
+	struct ndtest_priv *p;
+	int rc;
+
+	p = to_ndtest_priv(&pdev->dev);
+	ndtest_bus_register(p, bus_configs);
+
+	p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+				 sizeof(dma_addr_t), GFP_KERNEL);
+	p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+				   sizeof(dma_addr_t), GFP_KERNEL);
+	p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+				  sizeof(dma_addr_t), GFP_KERNEL);
+
+	rc = ndtest_nvdimm_init(p);
+	if (rc)
+		goto err;
+
+	rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
+	if (rc)
+		goto err;
+
+	platform_set_drvdata(pdev, p);
+
+	return 0;
+
+err:
+	nvdimm_bus_unregister(p->bus);
+	kfree(p->bus_desc.provider_name);
+	put_device(&pdev->dev);
+	kfree(p);
+	return rc;
+}
+
+static int ndtest_remove(struct platform_device *pdev)
+{
+	struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
+
+	nvdimm_bus_unregister(p->bus);
+	return 0;
+}
+
+static const struct platform_device_id ndtest_id[] = {
+	{ KBUILD_MODNAME },
+	{ },
+};
+
+static struct platform_driver ndtest_driver = {
+	.probe = ndtest_probe,
+	.remove = ndtest_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+	},
+	.id_table = ndtest_id,
+};
+
+static void ndtest_release(struct device *dev)
+{
+	struct ndtest_priv *p = to_ndtest_priv(dev);
+
+	kfree(p);
+}
+
+static __init int ndtest_init(void)
+{
+	int rc, i;
+
+	pmem_test();
+	libnvdimm_test();
+	device_dax_test();
+	dax_pmem_test();
+	dax_pmem_core_test();
+#ifdef CONFIG_DEV_DAX_PMEM_COMPAT
+	dax_pmem_compat_test();
+#endif
+
+	nfit_test_setup(ndtest_resource_lookup, NULL);
+
+	ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
+	if (IS_ERR(ndtest_dimm_class)) {
+		rc = PTR_ERR(ndtest_dimm_class);
+		goto err_register;
+	}
+
+	ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
+	if (!ndtest_pool) {
+		rc = -ENOMEM;
+		goto err_register;
+	}
+
+	if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
+		rc = -ENOMEM;
+		goto err_register;
+	}
+
+	/* Each instance can be taken as a bus, which can have multiple dimms */
+	for (i = 0; i < NUM_INSTANCES; i++) {
+		struct ndtest_priv *priv;
+		struct platform_device *pdev;
+
+		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+		if (!priv) {
+			rc = -ENOMEM;
+			goto err_register;
+		}
+
+		INIT_LIST_HEAD(&priv->resources);
+		pdev = &priv->pdev;
+		pdev->name = KBUILD_MODNAME;
+		pdev->id = i;
+		pdev->dev.release = ndtest_release;
+		rc = platform_device_register(pdev);
+		if (rc) {
+			put_device(&pdev->dev);
+			goto err_register;
+		}
+		get_device(&pdev->dev);
+
+		instances[i] = priv;
+	}
+
+	rc = platform_driver_register(&ndtest_driver);
+	if (rc)
+		goto err_register;
+
+	return 0;
+
+err_register:
+	pr_err("Error registering platform device\n");
+	if (ndtest_pool)
+		gen_pool_destroy(ndtest_pool);
+
+	for (i = 0; i < NUM_INSTANCES; i++)
+		if (instances[i])
+			platform_device_unregister(&instances[i]->pdev);
+
+	nfit_test_teardown();
+	for (i = 0; i < NUM_INSTANCES; i++)
+		if (instances[i])
+			put_device(&instances[i]->pdev.dev);
+
+	return rc;
+}
+
+static __exit void ndtest_exit(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_INSTANCES; i++)
+		platform_device_unregister(&instances[i]->pdev);
+
+	platform_driver_unregister(&ndtest_driver);
+	nfit_test_teardown();
+
+	gen_pool_destroy(ndtest_pool);
+
+	for (i = 0; i < NUM_INSTANCES; i++)
+		put_device(&instances[i]->pdev.dev);
+	class_destroy(ndtest_dimm_class);
+}
+
+module_init(ndtest_init);
+module_exit(ndtest_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
diff --git a/tools/testing/nvdimm/test/ndtest.h b/tools/testing/nvdimm/test/ndtest.h
new file mode 100644
index 000000000000..2e8ff749e2f4
--- /dev/null
+++ b/tools/testing/nvdimm/test/ndtest.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef NDTEST_H
+#define NDTEST_H
+
+#include <linux/platform_device.h>
+#include <linux/libnvdimm.h>
+
+enum dimm_type {
+	NDTEST_REGION_TYPE_PMEM = 0x0,
+	NDTEST_REGION_TYPE_BLK = 0x1,
+};
+
+struct ndtest_priv {
+	struct platform_device pdev;
+	struct device_node *dn;
+	struct list_head resources;
+	struct nvdimm_bus_descriptor bus_desc;
+	struct nvdimm_bus *bus;
+	struct ndtest_config *config;
+
+	dma_addr_t *dcr_dma;
+	dma_addr_t *label_dma;
+	dma_addr_t *dimm_dma;
+	bool is_volatile;
+	unsigned int flags;
+	unsigned int nblks;
+};
+
+struct ndtest_blk_mmio {
+	void __iomem *base;
+	u64 size;
+	u64 base_offset;
+	u32 line_size;
+	u32 num_lines;
+	u32 table_size;
+};
+
+struct ndtest_dimm {
+	struct resource res;
+	struct device *dev;
+	struct nvdimm *nvdimm;
+	struct nd_region *region;
+	struct nd_interleave_set nd_set;
+	struct ndtest_blk_mmio *mmio;
+
+	dma_addr_t address;
+	unsigned long config_size;
+	unsigned long fail_cmd;
+	void *label_area;
+	char *uuid_str;
+	enum dimm_type type;
+	unsigned int size;
+	unsigned int handle;
+	unsigned int physical_id;
+	int id;
+	int fail_cmd_code;
+};
+
+struct ndtest_config {
+	unsigned int dimm_count;
+	unsigned int dimm_start;
+	struct ndtest_dimm *dimm;
+};
+
+#endif /* NDTEST_H */
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices
  2020-10-06  1:00 [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Santosh Sivaraj
@ 2020-10-07  4:22 ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 2/9] test/core: Don't fail is nfit module is missing Santosh Sivaraj
                     ` (7 more replies)
  2020-12-07 22:00 ` [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Dan Williams
  1 sibling, 8 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

Add attributes to generic dimms that are independent of platforms like the
test dimms.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 ndctl/lib/libndctl.c | 51 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/ndctl/lib/libndctl.c b/ndctl/lib/libndctl.c
index 952192c..852cb4d 100644
--- a/ndctl/lib/libndctl.c
+++ b/ndctl/lib/libndctl.c
@@ -1619,6 +1619,53 @@ static int add_nfit_dimm(struct ndctl_dimm *dimm, const char *dimm_base)
 	free(path);
 	return rc;
 }
+static void populate_dimm_attributes(struct ndctl_dimm *dimm,
+				     const char *dimm_base)
+{
+	char buf[SYSFS_ATTR_SIZE];
+	struct ndctl_ctx *ctx = dimm->bus->ctx;
+	char *path = calloc(1, strlen(dimm_base) + 100);
+
+	sprintf(path, "%s/phys_id", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) < 0)
+		goto err_read;
+	dimm->phys_id = strtoul(buf, NULL, 0);
+
+	sprintf(path, "%s/handle", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) < 0)
+		goto err_read;
+	dimm->handle = strtoul(buf, NULL, 0);
+
+	sprintf(path, "%s/vendor", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) == 0)
+		dimm->vendor_id = strtoul(buf, NULL, 0);
+
+	sprintf(path, "%s/id", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) == 0) {
+		unsigned int b[9];
+
+		dimm->unique_id = strdup(buf);
+		if (!dimm->unique_id)
+			goto err_read;
+		if (sscanf(dimm->unique_id, "%02x%02x-%02x-%02x%02x-%02x%02x%02x%02x",
+					&b[0], &b[1], &b[2], &b[3], &b[4],
+					&b[5], &b[6], &b[7], &b[8]) == 9) {
+			dimm->manufacturing_date = b[3] << 8 | b[4];
+			dimm->manufacturing_location = b[2];
+		}
+	}
+	sprintf(path, "%s/subsystem_vendor", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) == 0)
+		dimm->subsystem_vendor_id = strtoul(buf, NULL, 0);
+
+
+	sprintf(path, "%s/dirty_shutdown", dimm_base);
+	if (sysfs_read_attr(ctx, path, buf) == 0)
+		dimm->dirty_shutdown = strtoll(buf, NULL, 0);
+
+err_read:
+	free(path);
+}
 
 static void *add_dimm(void *parent, int id, const char *dimm_base)
 {
@@ -1694,6 +1741,10 @@ static void *add_dimm(void *parent, int id, const char *dimm_base)
 	} else
 		parse_dimm_flags(dimm, buf);
 
+	/* add the available dimm attributes, the platform can override or add
+	 * additional attributes later */
+	populate_dimm_attributes(dimm, dimm_base);
+
 	/* Check if the given dimm supports nfit */
 	if (ndctl_bus_has_nfit(bus)) {
 		dimm->formats = formats;
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 2/9] test/core: Don't fail is nfit module is missing
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 3/9] test/libndctl: Don't compare phys-id if no-interleave support Santosh Sivaraj
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

This will happen in platforms that don't have ACPI support.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/core.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/test/core.c b/test/core.c
index 5118d86..e3e93ff 100644
--- a/test/core.c
+++ b/test/core.c
@@ -195,6 +195,11 @@ retry:
 
 		path = kmod_module_get_path(*mod);
 		if (!path) {
+			/* For non-nfit platforms it's ok if nfit module is
+			 * missing */
+			if (strcmp(name, "nfit") == 0)
+				continue;
+
 			log_err(&log_ctx, "%s.ko: failed to get path\n", name);
 			break;
 		}
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 3/9] test/libndctl: Don't compare phys-id if no-interleave support
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 2/9] test/core: Don't fail is nfit module is missing Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 4/9] test/libndctl: search by handle instead of range index Santosh Sivaraj
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

The tests expect some regions to have the same physical id, but that
will not be the case if there is no interleave support.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/libndctl.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/test/libndctl.c b/test/libndctl.c
index 994e0fa..d508948 100644
--- a/test/libndctl.c
+++ b/test/libndctl.c
@@ -2484,7 +2484,8 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n,
 			return -ENXIO;
 		}
 
-		if (ndctl_dimm_get_phys_id(dimm) != dimms[i].phys_id) {
+		if (ndctl_bus_has_nfit(bus) &&
+		    ndctl_dimm_get_phys_id(dimm) != dimms[i].phys_id) {
 			fprintf(stderr, "dimm%d expected phys_id: %d got: %d\n",
 					i, dimms[i].phys_id,
 					ndctl_dimm_get_phys_id(dimm));
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 4/9] test/libndctl: search by handle instead of range index
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 2/9] test/core: Don't fail is nfit module is missing Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 3/9] test/libndctl: Don't compare phys-id if no-interleave support Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 5/9] test/libndctl: skip smart tests for non-nfit platforms Santosh Sivaraj
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

When there is no-interleave support, there is no range index.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/libndctl.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/test/libndctl.c b/test/libndctl.c
index d508948..1a5a267 100644
--- a/test/libndctl.c
+++ b/test/libndctl.c
@@ -495,6 +495,26 @@ static struct ndctl_region *get_pmem_region_by_range_index(struct ndctl_bus *bus
 	return NULL;
 }
 
+static struct ndctl_region *get_pmem_region_by_dimm_handle(struct ndctl_bus *bus,
+		unsigned int handle)
+{
+	struct ndctl_region *region;
+
+	ndctl_region_foreach(bus, region) {
+		struct ndctl_mapping *map;
+
+		if (ndctl_region_get_type(region) != ND_DEVICE_REGION_PMEM)
+			continue;
+		ndctl_mapping_foreach(region, map) {
+			struct ndctl_dimm *dimm = ndctl_mapping_get_dimm(map);
+
+			if (ndctl_dimm_get_handle(dimm) == handle)
+				return region;
+		}
+	}
+	return NULL;
+}
+
 static struct ndctl_region *get_blk_region_by_dimm_handle(struct ndctl_bus *bus,
 		unsigned int handle)
 {
@@ -532,8 +552,12 @@ static int check_regions(struct ndctl_bus *bus, struct region *regions, int n,
 		struct ndctl_interleave_set *iset;
 		char devname[50];
 
-		if (strcmp(regions[i].type, "pmem") == 0)
-			region = get_pmem_region_by_range_index(bus, regions[i].range_index);
+		if (strcmp(regions[i].type, "pmem") == 0) {
+			if (ndctl_bus_has_nfit(bus))
+				region = get_pmem_region_by_range_index(bus, regions[i].range_index);
+			else
+				region = get_pmem_region_by_dimm_handle(bus, regions[i].handle);
+		}
 		else
 			region = get_blk_region_by_dimm_handle(bus, regions[i].handle);
 
@@ -2668,6 +2692,10 @@ static int do_test1(struct ndctl_ctx *ctx, struct ndctl_test *test)
 	if (ndctl_test_attempt(test, KERNEL_VERSION(4, 10, 0)))
 		dimms1[0].handle = DIMM_HANDLE(1, 0, 0, 0, 0);
 
+	if (!ndctl_bus_has_nfit(bus))
+		regions1[0].handle = DIMM_HANDLE(1, 0, 0, 0, 0);
+
+
 	rc = check_dimms(bus, dimms1, ARRAY_SIZE(dimms1), 0, 0, test);
 	if (rc)
 		return rc;
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 5/9] test/libndctl: skip smart tests for non-nfit platforms
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
                     ` (2 preceding siblings ...)
  2020-10-07  4:22   ` [PATCH RFC ndctl 4/9] test/libndctl: search by handle instead of range index Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 6/9] test/libndctl: Don't check for two formats on a dimm Santosh Sivaraj
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/libndctl.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/libndctl.c b/test/libndctl.c
index 1a5a267..93cbc7a 100644
--- a/test/libndctl.c
+++ b/test/libndctl.c
@@ -2451,7 +2451,7 @@ static int check_commands(struct ndctl_bus *bus, struct ndctl_dimm *dimm,
 	 * The kernel did not start emulating v1.2 namespace spec smart data
 	 * until 4.9.
 	 */
-	if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0)))
+	if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0)) || !ndctl_bus_has_nfit(bus))
 		dimm_commands &= ~((1 << ND_CMD_SMART)
 				| (1 << ND_CMD_SMART_THRESHOLD));
 
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 6/9] test/libndctl: Don't check for two formats on a dimm
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
                     ` (3 preceding siblings ...)
  2020-10-07  4:22   ` [PATCH RFC ndctl 5/9] test/libndctl: skip smart tests for non-nfit platforms Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 7/9] test/libndctl: Don't check for error flags on non-nfit dimms Santosh Sivaraj
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

The number of formats is itself populated only when there is a nfit
bus present. So skip this sub-test for all other platforms.

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/libndctl.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/test/libndctl.c b/test/libndctl.c
index 93cbc7a..aaa72dc 100644
--- a/test/libndctl.c
+++ b/test/libndctl.c
@@ -2543,7 +2543,8 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n,
 			return -ENXIO;
 		}
 
-		if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0))) {
+		if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0)) &&
+		    ndctl_bus_has_nfit(bus)) {
 			if (ndctl_dimm_get_formats(dimm) != dimms[i].formats) {
 				fprintf(stderr, "dimm%d expected formats: %d got: %d\n",
 						i, dimms[i].formats,
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 7/9] test/libndctl: Don't check for error flags on non-nfit dimms
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
                     ` (4 preceding siblings ...)
  2020-10-07  4:22   ` [PATCH RFC ndctl 6/9] test/libndctl: Don't check for two formats on a dimm Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 8/9] test/multi-pmem: fix for no-interleave support Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 9/9] test: Disable paths which are possibly wrong Santosh Sivaraj
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/libndctl.c | 61 +++++++++++++++++++++++++++----------------------
 1 file changed, 34 insertions(+), 27 deletions(-)

diff --git a/test/libndctl.c b/test/libndctl.c
index aaa72dc..ae87807 100644
--- a/test/libndctl.c
+++ b/test/libndctl.c
@@ -575,7 +575,8 @@ static int check_regions(struct ndctl_bus *bus, struct region *regions, int n,
 					ndctl_region_get_type_name(region));
 			return -ENXIO;
 		}
-		if (ndctl_region_get_interleave_ways(region) != regions[i].interleave_ways) {
+		if (ndctl_bus_has_nfit(bus) &&
+		    ndctl_region_get_interleave_ways(region) != regions[i].interleave_ways) {
 			fprintf(stderr, "%s: expected interleave_ways: %d got: %d\n",
 					devname, regions[i].interleave_ways,
 					ndctl_region_get_interleave_ways(region));
@@ -2516,20 +2517,21 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n,
 			return -ENXIO;
 		}
 
-		if (ndctl_dimm_has_errors(dimm) != !!dimms[i].flags) {
-			fprintf(stderr, "bus: %s dimm%d %s expected%s errors\n",
+		if (ndctl_bus_has_nfit(bus)) {
+			if (ndctl_dimm_has_errors(dimm) != !!dimms[i].flags) {
+				fprintf(stderr, "bus: %s dimm%d %s expected%s errors\n",
 					ndctl_bus_get_provider(bus), i,
 					ndctl_dimm_get_devname(dimm),
 					dimms[i].flags ? "" : " no");
-			return -ENXIO;
-		}
+				return -ENXIO;
+			}
 
-		if (ndctl_dimm_failed_save(dimm) != dimms[i].f_save
-				|| ndctl_dimm_failed_arm(dimm) != dimms[i].f_arm
-				|| ndctl_dimm_failed_restore(dimm) != dimms[i].f_restore
-				|| ndctl_dimm_smart_pending(dimm) != dimms[i].f_smart
-				|| ndctl_dimm_failed_flush(dimm) != dimms[i].f_flush) {
-			fprintf(stderr, "expected: %s%s%s%s%sgot: %s%s%s%s%s\n",
+			if (ndctl_dimm_failed_save(dimm) != dimms[i].f_save
+			    || ndctl_dimm_failed_arm(dimm) != dimms[i].f_arm
+			    || ndctl_dimm_failed_restore(dimm) != dimms[i].f_restore
+			    || ndctl_dimm_smart_pending(dimm) != dimms[i].f_smart
+			    || ndctl_dimm_failed_flush(dimm) != dimms[i].f_flush) {
+				fprintf(stderr, "expected: %s%s%s%s%sgot: %s%s%s%s%s\n",
 					dimms[i].f_save ? "save_fail " : "",
 					dimms[i].f_arm ? "not_armed " : "",
 					dimms[i].f_restore ? "restore_fail " : "",
@@ -2540,24 +2542,25 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n,
 					ndctl_dimm_failed_restore(dimm) ? "restore_fail " : "",
 					ndctl_dimm_smart_pending(dimm) ? "smart_event " : "",
 					ndctl_dimm_failed_flush(dimm) ? "flush_fail " : "");
-			return -ENXIO;
-		}
+				return -ENXIO;
+			}
 
-		if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0)) &&
-		    ndctl_bus_has_nfit(bus)) {
-			if (ndctl_dimm_get_formats(dimm) != dimms[i].formats) {
-				fprintf(stderr, "dimm%d expected formats: %d got: %d\n",
+			if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0))) {
+				if (ndctl_dimm_get_formats(dimm) != dimms[i].formats) {
+					fprintf(stderr, "dimm%d expected formats: %d got: %d\n",
 						i, dimms[i].formats,
 						ndctl_dimm_get_formats(dimm));
-				return -ENXIO;
-			}
-			for (j = 0; j < dimms[i].formats; j++) {
-				if (ndctl_dimm_get_formatN(dimm, j) != dimms[i].format[j]) {
-					fprintf(stderr,
-						"dimm%d expected format[%d]: %d got: %d\n",
+					return -ENXIO;
+				}
+				for (j = 0; j < dimms[i].formats; j++) {
+					if (ndctl_dimm_get_formatN(dimm, j) !=
+					    dimms[i].format[j]) {
+						fprintf(stderr,
+							"dimm%d expected format[%d]: %d got: %d\n",
 							i, j, dimms[i].format[j],
 							ndctl_dimm_get_formatN(dimm, j));
-					return -ENXIO;
+						return -ENXIO;
+					}
 				}
 			}
 		}
@@ -2623,6 +2626,7 @@ static int do_test0(struct ndctl_ctx *ctx, struct ndctl_test *test)
 	struct ndctl_bus *bus = ndctl_bus_get_by_provider(ctx, NFIT_PROVIDER0);
 	struct ndctl_region *region;
 	struct ndctl_dimm *dimm;
+	unsigned num_regions = ARRAY_SIZE(regions0);
 	int rc;
 
 	if (!bus)
@@ -2658,22 +2662,25 @@ static int do_test0(struct ndctl_ctx *ctx, struct ndctl_test *test)
 				* ndctl_region_get_interleave_ways(region));
 	}
 
+	if (!ndctl_bus_has_nfit(bus))
+		num_regions = 1;
+
 	/* pfn and dax tests require vmalloc-enabled nfit_test */
 	if (ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) {
-		rc = check_regions(bus, regions0, ARRAY_SIZE(regions0), DAX);
+		rc = check_regions(bus, regions0, num_regions, DAX);
 		if (rc)
 			return rc;
 		reset_bus(bus);
 	}
 
 	if (ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) {
-		rc = check_regions(bus, regions0, ARRAY_SIZE(regions0), PFN);
+		rc = check_regions(bus, regions0, num_regions, PFN);
 		if (rc)
 			return rc;
 		reset_bus(bus);
 	}
 
-	return check_regions(bus, regions0, ARRAY_SIZE(regions0), BTT);
+	return check_regions(bus, regions0, num_regions, BTT);
 }
 
 static int do_test1(struct ndctl_ctx *ctx, struct ndctl_test *test)
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 8/9] test/multi-pmem: fix for no-interleave support
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
                     ` (5 preceding siblings ...)
  2020-10-07  4:22   ` [PATCH RFC ndctl 7/9] test/libndctl: Don't check for error flags on non-nfit dimms Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  2020-10-07  4:22   ` [PATCH RFC ndctl 9/9] test: Disable paths which are possibly wrong Santosh Sivaraj
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/multi-pmem.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/test/multi-pmem.c b/test/multi-pmem.c
index 668662c..ef4fec0 100644
--- a/test/multi-pmem.c
+++ b/test/multi-pmem.c
@@ -208,6 +208,14 @@ static int do_multi_pmem(struct ndctl_ctx *ctx, struct ndctl_test *test)
 			break;
 	}
 
+	/* FIXME: the above expects the a blk region to be available on the
+	 * previously identified dimm, which won't if we don't have interleaved
+	 * support
+	 */
+
+	if (!region)
+		goto no_interleave;
+
 	blk_avail_orig = ndctl_region_get_available_size(region);
 	for (i = 1; i < NUM_NAMESPACES - 1; i++) {
 		ndns = namespaces[i];
@@ -239,6 +247,7 @@ static int do_multi_pmem(struct ndctl_ctx *ctx, struct ndctl_test *test)
 	if (check_deleted(target, devname, test) != 0)
 		return -ENXIO;
 
+no_interleave:
 	ndctl_bus_foreach(ctx, bus) {
 		if (strncmp(ndctl_bus_get_provider(bus), "nfit_test", 9) != 0)
 			continue;
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC ndctl 9/9] test: Disable paths which are possibly wrong
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
                     ` (6 preceding siblings ...)
  2020-10-07  4:22   ` [PATCH RFC ndctl 8/9] test/multi-pmem: fix for no-interleave support Santosh Sivaraj
@ 2020-10-07  4:22   ` Santosh Sivaraj
  7 siblings, 0 replies; 13+ messages in thread
From: Santosh Sivaraj @ 2020-10-07  4:22 UTC (permalink / raw)
  To: Linux NVDIMM, Vishal Verma, Vaibhav Jain, Aneesh Kumar K.V,
	Harish Sriram, Shivaprasad G Bhat
  Cc: Santosh Sivaraj

Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
---
 test/dpa-alloc.c | 9 ++++++++-
 test/dsm-fail.c  | 8 +++++++-
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/test/dpa-alloc.c b/test/dpa-alloc.c
index b757b9a..a933b54 100644
--- a/test/dpa-alloc.c
+++ b/test/dpa-alloc.c
@@ -277,11 +277,18 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test)
 		return -ENXIO;
 	}
 
+	/* FIXME: there should be a delete here, to remove the last namespace,
+	 * otherwise the comparison should fail below (available vs
+	 * default). But not sure why it isn't failing with the nfit code. What
+	 * am I missing? */
+#if 0
 	available_slots = ndctl_dimm_get_available_labels(dimm);
 	if (available_slots != default_available_slots - 1) {
-		fprintf(stderr, "mishandled slot count\n");
+		fprintf(stderr, "mishandled slot count (available: %u, default: %u)\n",
+			available_slots, default_available_slots - 1);
 		return -ENXIO;
 	}
+#endif
 
 	ndctl_region_foreach(bus, region)
 		ndctl_region_disable_invalidate(region);
diff --git a/test/dsm-fail.c b/test/dsm-fail.c
index b2c51db..f303f09 100644
--- a/test/dsm-fail.c
+++ b/test/dsm-fail.c
@@ -290,6 +290,12 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test)
 		goto out;
 
 
+	/* The below test will fail, when -EACCES is set for getting config
+	 * size, how will the dimm be enabled? With the nfit driver, the dimm
+	 * is enabled and then failed only with the return code, is that the
+	 * right way?
+	 */
+#if 0
 	rc = set_dimm_response(DIMM_PATH, ND_CMD_GET_CONFIG_SIZE, -EACCES,
 			&log_ctx);
 	if (rc)
@@ -309,7 +315,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test)
 	rc = dimms_disable(bus);
 	if (rc)
 		goto out;
-
+#endif
 	rc = set_dimm_response(DIMM_PATH, ND_CMD_GET_CONFIG_DATA, -EACCES,
 			&log_ctx);
 	if (rc)
-- 
2.26.2
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms
  2020-10-06  1:00 [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Santosh Sivaraj
  2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
@ 2020-12-07 22:00 ` Dan Williams
  2020-12-09  4:17   ` Aneesh Kumar K.V
  1 sibling, 1 reply; 13+ messages in thread
From: Dan Williams @ 2020-12-07 22:00 UTC (permalink / raw)
  To: Santosh Sivaraj
  Cc: Linux NVDIMM, Aneesh Kumar K.V, Vaibhav Jain, Shivaprasad G Bhat,
	Harish Sriram

On Mon, Oct 5, 2020 at 6:01 PM Santosh Sivaraj <santosh@fossix.org> wrote:
>
> The current test module cannot be used for testing platforms (make check)
> that do no have support for NFIT. In order to get the ndctl tests working,
> we need a module which can emulate NVDIMM devices without relying on
> ACPI/NFIT.
>
> The aim of this proposed module is to implement a similar functionality to the
> existing module but without the ACPI dependencies. Currently interleaving and
> error injection are not implemented.
>
> Corresponding changes for ndctl is also required, to skip tests that depend
> on nfit attributes, which will be sent as a reply to this.

Looks pretty good to me. Some minor comments below. Probably the
biggest feedback is that I'd like to be able to run tests against
nfit_test.ko and ndtest.ko without reconfiguring/recompiling. I.e. on
x86 run both tests, on non-ACPI-NFIT, just run ndtest.

>
> Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
> ---
>  tools/testing/nvdimm/config_check.c |   3 +-
>  tools/testing/nvdimm/test/Kbuild    |   6 +-
>  tools/testing/nvdimm/test/ndtest.c  | 819 ++++++++++++++++++++++++++++
>  tools/testing/nvdimm/test/ndtest.h  |  65 +++
>  4 files changed, 891 insertions(+), 2 deletions(-)
>  create mode 100644 tools/testing/nvdimm/test/ndtest.c
>  create mode 100644 tools/testing/nvdimm/test/ndtest.h
>
> diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c
> index cac891028cd1..3e3a5f518864 100644
> --- a/tools/testing/nvdimm/config_check.c
> +++ b/tools/testing/nvdimm/config_check.c
> @@ -12,7 +12,8 @@ void check(void)
>         BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
>         BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_PFN));
>         BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
> -       BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
> +       if (IS_ENABLED(CONFIG_ACPI_NFIT))
> +               BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));

This defeats the purpose of having a safety check for nfit_test.ko.
So, I think this wants to be split into a config_check for
nfit_test.ko builds and a separate one for ndtest.ko builds.

>         BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
>         BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX_PMEM));
>  }
> diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild
> index 75baebf8f4ba..197bcb2b7f35 100644
> --- a/tools/testing/nvdimm/test/Kbuild
> +++ b/tools/testing/nvdimm/test/Kbuild
> @@ -5,5 +5,9 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
>  obj-m += nfit_test.o
>  obj-m += nfit_test_iomap.o
>
> -nfit_test-y := nfit.o
> +ifeq  ($(CONFIG_ACPI_NFIT),m)
> +       nfit_test-y := nfit.o
> +else
> +       nfit_test-y := ndtest.o
> +endif

Rather than an if-else I'd prefer to always build ndtest.ko and
optionally build nfit_test.ko.

>  nfit_test_iomap-y := iomap.o
> diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
> new file mode 100644
> index 000000000000..415a40345584
> --- /dev/null
> +++ b/tools/testing/nvdimm/test/ndtest.c
> @@ -0,0 +1,819 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +#include <linux/platform_device.h>
> +#include <linux/device.h>
> +#include <linux/module.h>
> +#include <linux/genalloc.h>
> +#include <linux/vmalloc.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/list_sort.h>
> +#include <linux/libnvdimm.h>
> +#include <linux/ndctl.h>
> +#include <nd-core.h>
> +#include <linux/printk.h>
> +
> +#include "../watermark.h"
> +#include "nfit_test.h"
> +#include "ndtest.h"
> +
> +enum {
> +       DIMM_SIZE = SZ_32M,
> +       LABEL_SIZE = SZ_128K,
> +       NUM_INSTANCES = 2,
> +       NUM_DCR = 4,
> +};
> +
> +#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm)         \
> +       (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
> +        | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
> +
> +static struct ndtest_dimm dimm_group1[] = {
> +       {
> +               .type = NDTEST_REGION_TYPE_BLK | NDTEST_REGION_TYPE_PMEM,
> +               .size = DIMM_SIZE,
> +               .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
> +               .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
> +               .physical_id = 0,
> +       },
> +       {
> +               .type = NDTEST_REGION_TYPE_PMEM,
> +               .size = DIMM_SIZE,
> +               .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
> +               .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
> +               .physical_id = 1,
> +       },
> +       {
> +               .type = NDTEST_REGION_TYPE_PMEM,
> +               .size = DIMM_SIZE * 2,
> +               .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
> +               .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
> +               .physical_id = 2,
> +       },
> +       {
> +               .type = NDTEST_REGION_TYPE_BLK,
> +               .size = DIMM_SIZE,
> +               .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
> +               .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
> +               .physical_id = 3,
> +       },
> +       {
> +               .type = NDTEST_REGION_TYPE_PMEM,
> +               .size = DIMM_SIZE,
> +               .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
> +               .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
> +               .physical_id = 4,
> +       },
> +};
> +
> +static struct ndtest_dimm dimm_group2[] = {
> +       {
> +               .type = NDTEST_REGION_TYPE_PMEM,
> +               .size = DIMM_SIZE,
> +               .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
> +               .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
> +               .physical_id = 0,
> +       },
> +};
> +
> +static struct ndtest_config bus_configs[NUM_INSTANCES] = {
> +       /* bus 1 */
> +       {
> +               .dimm_start = 0,
> +               .dimm_count = ARRAY_SIZE(dimm_group1),
> +               .dimm = dimm_group1,
> +       },
> +       /* bus 2 */
> +       {
> +               .dimm_start = ARRAY_SIZE(dimm_group1),
> +               .dimm_count = ARRAY_SIZE(dimm_group2),
> +               .dimm = dimm_group2,
> +       },
> +};
> +
> +static DEFINE_SPINLOCK(ndtest_lock);
> +static struct ndtest_priv *instances[NUM_INSTANCES];
> +static struct class *ndtest_dimm_class;
> +static struct gen_pool *ndtest_pool;
> +
> +static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
> +{
> +       struct platform_device *pdev = to_platform_device(dev);
> +
> +       return container_of(pdev, struct ndtest_priv, pdev);
> +}
> +
> +static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
> +                            struct nd_cmd_get_config_data_hdr *hdr)
> +{
> +       unsigned int len;
> +
> +       if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
> +               return -EINVAL;
> +
> +       hdr->status = 0;
> +       len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
> +       memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
> +
> +       return buf_len - len;
> +}
> +
> +static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
> +                            struct nd_cmd_set_config_hdr *hdr)
> +{
> +       unsigned int len;
> +
> +       if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
> +               return -EINVAL;
> +
> +       len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
> +       memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
> +
> +       return buf_len - len;
> +}
> +
> +static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
> +                    struct nvdimm *nvdimm, unsigned int cmd, void *buf,
> +                    unsigned int buf_len, int *cmd_rc)
> +{
> +       struct nd_cmd_get_config_size *size;
> +       struct ndtest_dimm *p;
> +       int _cmd_rc;
> +
> +       if (!cmd_rc)
> +               cmd_rc = &_cmd_rc;
> +
> +       *cmd_rc = 0;
> +
> +       if (!nvdimm)
> +               return -EINVAL;
> +
> +       p = nvdimm_provider_data(nvdimm);
> +       if (!p)
> +               return -EINVAL;
> +
> +       /* Failures for a DIMM can be injected using fail_cmd and
> +        * fail_cmd_code, see the device attributes below
> +        */
> +       if (p->fail_cmd)
> +               return p->fail_cmd_code ? p->fail_cmd_code : -EIO;
> +
> +       switch (cmd) {
> +       case ND_CMD_GET_CONFIG_SIZE:
> +               size = (struct nd_cmd_get_config_size *) buf;
> +               size->status = 0;
> +               size->max_xfer = 8;
> +               size->config_size = p->config_size;
> +               *cmd_rc = 0;
> +               break;
> +
> +       case ND_CMD_GET_CONFIG_DATA:
> +               *cmd_rc = ndtest_config_get(p, buf_len, buf);
> +               break;
> +
> +       case ND_CMD_SET_CONFIG_DATA:
> +               *cmd_rc = ndtest_config_set(p, buf_len, buf);
> +               break;
> +       default:
> +               dev_dbg(p->dev, "invalid command %u\n", cmd);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
> +               char *buf)
> +{
> +       struct ndtest_dimm *dimm = dev_get_drvdata(dev);
> +
> +       return sprintf(buf, "%#x\n", dimm->handle);
> +}
> +static DEVICE_ATTR_RO(handle);
> +
> +static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
> +               char *buf)
> +{
> +       struct ndtest_dimm *dimm = dev_get_drvdata(dev);
> +
> +       return sprintf(buf, "%#lx\n", dimm->fail_cmd);
> +}
> +
> +static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
> +               const char *buf, size_t size)
> +{
> +       struct ndtest_dimm *dimm = dev_get_drvdata(dev);
> +       unsigned long val;
> +       ssize_t rc;
> +
> +       rc = kstrtol(buf, 0, &val);
> +       if (rc)
> +               return rc;
> +
> +       dimm->fail_cmd = val;
> +       return size;
> +}
> +static DEVICE_ATTR_RW(fail_cmd);
> +
> +static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
> +               char *buf)
> +{
> +       struct ndtest_dimm *dimm = dev_get_drvdata(dev);
> +
> +       return sprintf(buf, "%d\n", dimm->fail_cmd_code);
> +}
> +
> +static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
> +               const char *buf, size_t size)
> +{
> +       struct ndtest_dimm *dimm = dev_get_drvdata(dev);
> +       unsigned long val;
> +       ssize_t rc;
> +
> +       rc = kstrtol(buf, 0, &val);
> +       if (rc)
> +               return rc;
> +
> +       dimm->fail_cmd_code = val;
> +       return size;
> +}
> +static DEVICE_ATTR_RW(fail_cmd_code);
> +
> +static struct attribute *dimm_attributes[] = {
> +       &dev_attr_handle.attr,
> +       &dev_attr_fail_cmd.attr,
> +       &dev_attr_fail_cmd_code.attr,
> +       NULL,
> +};
> +
> +static struct attribute_group dimm_attribute_group = {
> +       .attrs = dimm_attributes,
> +};
> +
> +static const struct attribute_group *dimm_attribute_groups[] = {
> +       &dimm_attribute_group,
> +       NULL,
> +};
> +
> +static void put_dimms(void *data)
> +{
> +       struct ndtest_priv *p = data;
> +       int i;
> +
> +       for (i = 0; i < p->config->dimm_count; i++)
> +               if (p->config->dimm[i].dev) {
> +                       device_unregister(p->config->dimm[i].dev);
> +                       p->config->dimm[i].dev = NULL;
> +               }
> +}
> +
> +#define NDTEST_SCM_DIMM_CMD_MASK          \
> +       ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
> +        (1ul << ND_CMD_GET_CONFIG_DATA) | \
> +        (1ul << ND_CMD_SET_CONFIG_DATA))
> +
> +static ssize_t phys_id_show(struct device *dev,
> +               struct device_attribute *attr, char *buf)
> +{
> +       struct nvdimm *nvdimm = to_nvdimm(dev);
> +       struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
> +
> +       return sprintf(buf, "%#x\n", dimm->physical_id);
> +}
> +static DEVICE_ATTR_RO(phys_id);
> +
> +static ssize_t vendor_show(struct device *dev,
> +               struct device_attribute *attr, char *buf)
> +{
> +       return sprintf(buf, "0x1234567\n");
> +}
> +static DEVICE_ATTR_RO(vendor);
> +
> +static ssize_t id_show(struct device *dev,
> +               struct device_attribute *attr, char *buf)
> +{
> +       struct nvdimm *nvdimm = to_nvdimm(dev);
> +       struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
> +
> +       return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
> +                      0xa, 2016, ~(dimm->handle));
> +}
> +static DEVICE_ATTR_RO(id);
> +
> +static ssize_t nvdimm_handle_show(struct device *dev,
> +                                 struct device_attribute *attr, char *buf)
> +{
> +       struct nvdimm *nvdimm = to_nvdimm(dev);
> +       struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
> +
> +       return sprintf(buf, "%#x\n", dimm->handle);
> +}
> +
> +static struct device_attribute dev_attr_nvdimm_show_handle =  {
> +       .attr   = { .name = "handle", .mode = 0444 },
> +       .show   = nvdimm_handle_show,
> +};
> +
> +static ssize_t subsystem_vendor_show(struct device *dev,
> +               struct device_attribute *attr, char *buf)
> +{
> +       return sprintf(buf, "0x%04x\n", 0);
> +}
> +static DEVICE_ATTR_RO(subsystem_vendor);
> +
> +static ssize_t dirty_shutdown_show(struct device *dev,
> +               struct device_attribute *attr, char *buf)
> +{
> +       return sprintf(buf, "%d\n", 42);
> +}
> +static DEVICE_ATTR_RO(dirty_shutdown);
> +
> +static struct attribute *ndtest_nvdimm_attributes[] = {
> +       &dev_attr_nvdimm_show_handle.attr,
> +       &dev_attr_vendor.attr,
> +       &dev_attr_id.attr,
> +       &dev_attr_phys_id.attr,
> +       &dev_attr_subsystem_vendor.attr,
> +       &dev_attr_dirty_shutdown.attr,
> +       NULL,
> +};
> +
> +static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
> +                                       struct attribute *a, int n)
> +{
> +       return a->mode;
> +}
> +
> +static const struct attribute_group ndtest_nvdimm_attribute_group = {
> +       .attrs = ndtest_nvdimm_attributes,
> +       .is_visible = ndtest_nvdimm_attr_visible,
> +};
> +
> +static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
> +       &ndtest_nvdimm_attribute_group,
> +       NULL,
> +};
> +
> +static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
> +               void *iobuf, u64 len, int rw)
> +{
> +       struct ndtest_dimm *dimm = ndbr->blk_provider_data;
> +       struct ndtest_blk_mmio *mmio = dimm->mmio;
> +       struct nd_region *nd_region = &ndbr->nd_region;
> +       unsigned int lane;
> +
> +       lane = nd_region_acquire_lane(nd_region);
> +
> +       if (rw)
> +               memcpy(mmio->base + dpa, iobuf, len);
> +       else {
> +               memcpy(iobuf, mmio->base + dpa, len);
> +               arch_invalidate_pmem(mmio->base + dpa, len);
> +       }
> +
> +       nd_region_release_lane(nd_region, lane);
> +
> +       return 0;
> +}
> +
> +static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
> +                                   struct device *dev)
> +{
> +       struct nd_blk_region *ndbr = to_nd_blk_region(dev);
> +       struct nvdimm *nvdimm;
> +       struct ndtest_dimm *p;
> +       struct ndtest_blk_mmio *mmio;
> +
> +       nvdimm = nd_blk_region_to_dimm(ndbr);
> +       p = nvdimm_provider_data(nvdimm);
> +
> +       nd_blk_region_set_provider_data(ndbr, p);
> +       p->region = to_nd_region(dev);
> +
> +       mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
> +       if (!mmio)
> +               return -ENOMEM;
> +
> +       mmio->base = devm_nvdimm_memremap(dev, p->address, 12,
> +                                        nd_blk_memremap_flags(ndbr));
> +       if (!mmio->base) {
> +               dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
> +               return -ENOMEM;
> +       }
> +
> +       p->mmio = mmio;
> +
> +       return 0;
> +}

Are there any ppc nvdimm that will use BLK mode? As far as I know
BLK-mode is only an abandoned mechanism in the ACPI specification, not
anything that has made it into a shipping implementation. I'd prefer
to not extend it if it's not necessary.

> +
> +static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_INSTANCES; i++) {
> +               struct nfit_test_resource *n, *nfit_res = NULL;
> +               struct ndtest_priv *t = instances[i];
> +
> +               if (!t)
> +                       continue;
> +               spin_lock(&ndtest_lock);
> +               list_for_each_entry(n, &t->resources, list) {
> +                       if (addr >= n->res.start && (addr < n->res.start
> +                                               + resource_size(&n->res))) {
> +                               nfit_res = n;
> +                               break;
> +                       } else if (addr >= (unsigned long) n->buf
> +                                       && (addr < (unsigned long) n->buf
> +                                               + resource_size(&n->res))) {
> +                               nfit_res = n;
> +                               break;
> +                       }
> +               }
> +               spin_unlock(&ndtest_lock);
> +               if (nfit_res)
> +                       return nfit_res;
> +       }
> +
> +       pr_warn("Failed to get resource\n");
> +
> +       return NULL;
> +}
> +
> +static void ndtest_release_resource(void *data)
> +{
> +       struct nfit_test_resource *res  = data;
> +
> +       spin_lock(&ndtest_lock);
> +       list_del(&res->list);
> +       spin_unlock(&ndtest_lock);
> +
> +       if (resource_size(&res->res) >= DIMM_SIZE)
> +               gen_pool_free(ndtest_pool, res->res.start,
> +                               resource_size(&res->res));
> +       vfree(res->buf);
> +       kfree(res);
> +}
> +
> +static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
> +                                  dma_addr_t *dma)
> +{
> +       dma_addr_t __dma;
> +       void *buf;
> +       struct nfit_test_resource *res;
> +       struct genpool_data_align data = {
> +               .align = SZ_128M,
> +       };
> +
> +       res = kzalloc(sizeof(*res), GFP_KERNEL);
> +       if (!res)
> +               return NULL;
> +
> +       buf = vmalloc(size);
> +       if (size >= DIMM_SIZE)
> +               __dma = gen_pool_alloc_algo(ndtest_pool, size,
> +                                         gen_pool_first_fit_align, &data);
> +       else
> +               __dma = (unsigned long) buf;
> +
> +       if (!__dma)
> +               goto buf_err;
> +
> +       INIT_LIST_HEAD(&res->list);
> +       res->dev = &p->pdev.dev;
> +       res->buf = buf;
> +       res->res.start = __dma;
> +       res->res.end = __dma + size - 1;
> +       res->res.name = "NFIT";
> +       spin_lock_init(&res->lock);
> +       INIT_LIST_HEAD(&res->requests);
> +       spin_lock(&ndtest_lock);
> +       list_add(&res->list, &p->resources);
> +       spin_unlock(&ndtest_lock);
> +
> +       if (dma)
> +               *dma = __dma;
> +
> +       if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
> +               return res->buf;
> +
> +buf_err:
> +       if (__dma && size >= DIMM_SIZE)
> +               gen_pool_free(ndtest_pool, __dma, size);
> +       if (buf)
> +               vfree(buf);
> +       kfree(res);
> +
> +       return NULL;
> +}
> +
> +static int ndtest_dimm_register(struct ndtest_priv *priv,
> +                               struct ndtest_dimm *dimm, int id)
> +{
> +       struct device *dev = &priv->pdev.dev;
> +       struct nd_mapping_desc mapping;
> +       struct nd_region_desc *ndr_desc;
> +       struct nd_blk_region_desc ndbr_desc;
> +       unsigned long dimm_flags = 0;
> +
> +       if (dimm->type == NDTEST_REGION_TYPE_PMEM) {
> +               set_bit(NDD_ALIASING, &dimm_flags);
> +               if (priv->pdev.id == 0)
> +                       set_bit(NDD_LABELING, &dimm_flags);
> +       }
> +
> +       dimm->nvdimm = nvdimm_create(priv->bus, dimm,
> +                                   ndtest_nvdimm_attribute_groups, dimm_flags,
> +                                   NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
> +       if (!dimm->nvdimm) {
> +               dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
> +               return -ENXIO;
> +       }
> +
> +       memset(&mapping, 0, sizeof(mapping));
> +       memset(&ndbr_desc, 0, sizeof(ndbr_desc));
> +
> +       /* now add the region */
> +       memset(&mapping, 0, sizeof(mapping));
> +       mapping.nvdimm = dimm->nvdimm;
> +       mapping.start = dimm->res.start;
> +       mapping.size = dimm->size;
> +
> +       ndr_desc = &ndbr_desc.ndr_desc;
> +       memset(ndr_desc, 0, sizeof(*ndr_desc));
> +       ndr_desc->res = &dimm->res;
> +       ndr_desc->provider_data = dimm;
> +       ndr_desc->mapping = &mapping;
> +       ndr_desc->num_mappings = 1;
> +       ndr_desc->nd_set = &dimm->nd_set;
> +       ndr_desc->num_lanes = 1;
> +
> +       if (dimm->type & NDTEST_REGION_TYPE_BLK) {
> +               ndbr_desc.enable = ndtest_blk_region_enable;
> +               ndbr_desc.do_io = ndtest_blk_do_io;
> +               dimm->region = nvdimm_blk_region_create(priv->bus, ndr_desc);
> +       } else
> +               dimm->region = nvdimm_pmem_region_create(priv->bus, ndr_desc);
> +
> +       if (!dimm->region) {
> +               dev_err(dev, "Error registering region %pR\n", ndr_desc->res);
> +               return -ENXIO;
> +       }
> +
> +       dimm->dev = device_create_with_groups(ndtest_dimm_class,
> +                                            &priv->pdev.dev,
> +                                            0, dimm, dimm_attribute_groups,
> +                                            "test_dimm%d", id);
> +       if (!dimm->dev)
> +               return -ENOMEM;
> +
> +       return 0;
> +}
> +
> +static int ndtest_nvdimm_init(struct ndtest_priv *p)
> +{
> +       struct ndtest_dimm *d;
> +       u64 uuid[2];
> +       void *res;
> +       int i, id;
> +
> +       for (i = 0; i < p->config->dimm_count; i++) {
> +               d = &p->config->dimm[i];
> +               d->id = id = p->config->dimm_start + i;
> +               res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
> +               if (!res)
> +                       return -ENOMEM;
> +
> +               d->label_area = res;
> +               sprintf(d->label_area, "label%d", id);
> +               d->config_size = LABEL_SIZE;
> +               d->res.name = p->pdev.name;
> +
> +               if (uuid_parse(d->uuid_str, (uuid_t *) uuid))
> +                       pr_err("failed to parse UUID\n");
> +
> +               d->nd_set.cookie1 = cpu_to_le64(uuid[0]);
> +               d->nd_set.cookie2 = cpu_to_le64(uuid[1]);
> +
> +               switch (d->type) {
> +               case NDTEST_REGION_TYPE_PMEM:
> +                       /* setup the resource */
> +                       res = ndtest_alloc_resource(p, d->size,
> +                                                   &d->res.start);
> +                       if (!res)
> +                               return -ENOMEM;
> +
> +                       d->res.end = d->res.start + d->size - 1;
> +                       break;
> +               case NDTEST_REGION_TYPE_BLK:
> +                       WARN_ON(p->nblks > NUM_DCR);
> +
> +                       if (!ndtest_alloc_resource(p, d->size,
> +                                                  &p->dimm_dma[p->nblks]))
> +                               return -ENOMEM;
> +
> +                       if (!ndtest_alloc_resource(p, LABEL_SIZE,
> +                                   &p->label_dma[p->nblks]))
> +                               return -ENOMEM;
> +
> +                       if (!ndtest_alloc_resource(p, LABEL_SIZE,
> +                                   &p->dcr_dma[p->nblks]))
> +                               return -ENOMEM;
> +
> +                       d->address = p->dimm_dma[p->nblks];
> +                       p->nblks++;
> +
> +                       break;
> +               }
> +
> +               ndtest_dimm_register(p, d, id);
> +       }
> +
> +       return 0;
> +}
> +
> +static int ndtest_bus_register(struct ndtest_priv *p,
> +                              struct ndtest_config *config)
> +{
> +       p->config = &config[p->pdev.id];
> +
> +       p->bus_desc.ndctl = ndtest_ctl;
> +       p->bus_desc.module = THIS_MODULE;
> +       p->bus_desc.provider_name = NULL;
> +       p->bus_desc.cmd_mask =
> +               1UL << ND_CMD_ARS_CAP | 1UL << ND_CMD_ARS_START |
> +               1UL << ND_CMD_ARS_STATUS | 1UL << ND_CMD_CLEAR_ERROR |
> +               1UL << ND_CMD_CALL;
> +
> +       p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
> +       if (!p->bus) {
> +               dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
> +               return -ENOMEM;
> +       }
> +
> +       return 0;
> +}
> +
> +static int ndtest_probe(struct platform_device *pdev)
> +{
> +       struct ndtest_priv *p;
> +       int rc;
> +
> +       p = to_ndtest_priv(&pdev->dev);
> +       ndtest_bus_register(p, bus_configs);
> +
> +       p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
> +                                sizeof(dma_addr_t), GFP_KERNEL);
> +       p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
> +                                  sizeof(dma_addr_t), GFP_KERNEL);
> +       p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
> +                                 sizeof(dma_addr_t), GFP_KERNEL);
> +
> +       rc = ndtest_nvdimm_init(p);
> +       if (rc)
> +               goto err;
> +
> +       rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
> +       if (rc)
> +               goto err;
> +
> +       platform_set_drvdata(pdev, p);
> +
> +       return 0;
> +
> +err:
> +       nvdimm_bus_unregister(p->bus);
> +       kfree(p->bus_desc.provider_name);
> +       put_device(&pdev->dev);
> +       kfree(p);
> +       return rc;
> +}
> +
> +static int ndtest_remove(struct platform_device *pdev)
> +{
> +       struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
> +
> +       nvdimm_bus_unregister(p->bus);
> +       return 0;
> +}
> +
> +static const struct platform_device_id ndtest_id[] = {
> +       { KBUILD_MODNAME },
> +       { },
> +};
> +
> +static struct platform_driver ndtest_driver = {
> +       .probe = ndtest_probe,
> +       .remove = ndtest_remove,
> +       .driver = {
> +               .name = KBUILD_MODNAME,
> +       },
> +       .id_table = ndtest_id,
> +};
> +
> +static void ndtest_release(struct device *dev)
> +{
> +       struct ndtest_priv *p = to_ndtest_priv(dev);
> +
> +       kfree(p);
> +}
> +
> +static __init int ndtest_init(void)
> +{
> +       int rc, i;
> +
> +       pmem_test();
> +       libnvdimm_test();
> +       device_dax_test();
> +       dax_pmem_test();
> +       dax_pmem_core_test();
> +#ifdef CONFIG_DEV_DAX_PMEM_COMPAT
> +       dax_pmem_compat_test();
> +#endif
> +
> +       nfit_test_setup(ndtest_resource_lookup, NULL);

Since this is now generic shared infrastructure between the 2
implementations nfit_test_setup() wants an ndtest_setup() rename.
Anything that is shared between ndtest and nfit_test can just be
called ndtest.

> +
> +       ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
> +       if (IS_ERR(ndtest_dimm_class)) {
> +               rc = PTR_ERR(ndtest_dimm_class);
> +               goto err_register;
> +       }
> +
> +       ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
> +       if (!ndtest_pool) {
> +               rc = -ENOMEM;
> +               goto err_register;
> +       }
> +
> +       if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
> +               rc = -ENOMEM;
> +               goto err_register;
> +       }
> +
> +       /* Each instance can be taken as a bus, which can have multiple dimms */
> +       for (i = 0; i < NUM_INSTANCES; i++) {
> +               struct ndtest_priv *priv;
> +               struct platform_device *pdev;
> +
> +               priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> +               if (!priv) {
> +                       rc = -ENOMEM;
> +                       goto err_register;
> +               }
> +
> +               INIT_LIST_HEAD(&priv->resources);
> +               pdev = &priv->pdev;
> +               pdev->name = KBUILD_MODNAME;
> +               pdev->id = i;
> +               pdev->dev.release = ndtest_release;
> +               rc = platform_device_register(pdev);
> +               if (rc) {
> +                       put_device(&pdev->dev);
> +                       goto err_register;
> +               }
> +               get_device(&pdev->dev);
> +
> +               instances[i] = priv;
> +       }
> +
> +       rc = platform_driver_register(&ndtest_driver);
> +       if (rc)
> +               goto err_register;
> +
> +       return 0;
> +
> +err_register:
> +       pr_err("Error registering platform device\n");
> +       if (ndtest_pool)
> +               gen_pool_destroy(ndtest_pool);
> +
> +       for (i = 0; i < NUM_INSTANCES; i++)
> +               if (instances[i])
> +                       platform_device_unregister(&instances[i]->pdev);
> +
> +       nfit_test_teardown();
> +       for (i = 0; i < NUM_INSTANCES; i++)
> +               if (instances[i])
> +                       put_device(&instances[i]->pdev.dev);
> +
> +       return rc;
> +}
> +
> +static __exit void ndtest_exit(void)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_INSTANCES; i++)
> +               platform_device_unregister(&instances[i]->pdev);
> +
> +       platform_driver_unregister(&ndtest_driver);
> +       nfit_test_teardown();
> +
> +       gen_pool_destroy(ndtest_pool);
> +
> +       for (i = 0; i < NUM_INSTANCES; i++)
> +               put_device(&instances[i]->pdev.dev);
> +       class_destroy(ndtest_dimm_class);
> +}
> +
> +module_init(ndtest_init);
> +module_exit(ndtest_exit);
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR("IBM Corporation");
> diff --git a/tools/testing/nvdimm/test/ndtest.h b/tools/testing/nvdimm/test/ndtest.h
> new file mode 100644
> index 000000000000..2e8ff749e2f4
> --- /dev/null
> +++ b/tools/testing/nvdimm/test/ndtest.h
> @@ -0,0 +1,65 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +#ifndef NDTEST_H
> +#define NDTEST_H
> +
> +#include <linux/platform_device.h>
> +#include <linux/libnvdimm.h>
> +
> +enum dimm_type {
> +       NDTEST_REGION_TYPE_PMEM = 0x0,
> +       NDTEST_REGION_TYPE_BLK = 0x1,
> +};
> +
> +struct ndtest_priv {
> +       struct platform_device pdev;
> +       struct device_node *dn;
> +       struct list_head resources;
> +       struct nvdimm_bus_descriptor bus_desc;
> +       struct nvdimm_bus *bus;
> +       struct ndtest_config *config;
> +
> +       dma_addr_t *dcr_dma;
> +       dma_addr_t *label_dma;
> +       dma_addr_t *dimm_dma;
> +       bool is_volatile;
> +       unsigned int flags;
> +       unsigned int nblks;
> +};
> +
> +struct ndtest_blk_mmio {
> +       void __iomem *base;
> +       u64 size;
> +       u64 base_offset;
> +       u32 line_size;
> +       u32 num_lines;
> +       u32 table_size;
> +};
> +
> +struct ndtest_dimm {
> +       struct resource res;
> +       struct device *dev;
> +       struct nvdimm *nvdimm;
> +       struct nd_region *region;
> +       struct nd_interleave_set nd_set;
> +       struct ndtest_blk_mmio *mmio;
> +
> +       dma_addr_t address;
> +       unsigned long config_size;
> +       unsigned long fail_cmd;
> +       void *label_area;
> +       char *uuid_str;

I'm curious, what is the role of this UUID?

> +       enum dimm_type type;
> +       unsigned int size;
> +       unsigned int handle;
> +       unsigned int physical_id;
> +       int id;
> +       int fail_cmd_code;
> +};
> +
> +struct ndtest_config {
> +       unsigned int dimm_count;
> +       unsigned int dimm_start;
> +       struct ndtest_dimm *dimm;
> +};
> +
> +#endif /* NDTEST_H */
> --
> 2.26.2
> _______________________________________________
> Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
> To unsubscribe send an email to linux-nvdimm-leave@lists.01.org
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms
  2020-12-07 22:00 ` [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Dan Williams
@ 2020-12-09  4:17   ` Aneesh Kumar K.V
  2020-12-09  4:43     ` Dan Williams
  0 siblings, 1 reply; 13+ messages in thread
From: Aneesh Kumar K.V @ 2020-12-09  4:17 UTC (permalink / raw)
  To: Dan Williams, Santosh Sivaraj
  Cc: Linux NVDIMM, Vaibhav Jain, Shivaprasad G Bhat, Harish Sriram

On 12/8/20 3:30 AM, Dan Williams wrote:
> On Mon, Oct 5, 2020 at 6:01 PM Santosh Sivaraj <santosh@fossix.org> wrote:
>

...

>> +static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
>> +               void *iobuf, u64 len, int rw)
>> +{
>> +       struct ndtest_dimm *dimm = ndbr->blk_provider_data;
>> +       struct ndtest_blk_mmio *mmio = dimm->mmio;
>> +       struct nd_region *nd_region = &ndbr->nd_region;
>> +       unsigned int lane;
>> +
>> +       lane = nd_region_acquire_lane(nd_region);
>> +
>> +       if (rw)
>> +               memcpy(mmio->base + dpa, iobuf, len);
>> +       else {
>> +               memcpy(iobuf, mmio->base + dpa, len);
>> +               arch_invalidate_pmem(mmio->base + dpa, len);
>> +       }
>> +
>> +       nd_region_release_lane(nd_region, lane);
>> +
>> +       return 0;
>> +}
>> +
>> +static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
>> +                                   struct device *dev)
>> +{
>> +       struct nd_blk_region *ndbr = to_nd_blk_region(dev);
>> +       struct nvdimm *nvdimm;
>> +       struct ndtest_dimm *p;
>> +       struct ndtest_blk_mmio *mmio;
>> +
>> +       nvdimm = nd_blk_region_to_dimm(ndbr);
>> +       p = nvdimm_provider_data(nvdimm);
>> +
>> +       nd_blk_region_set_provider_data(ndbr, p);
>> +       p->region = to_nd_region(dev);
>> +
>> +       mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
>> +       if (!mmio)
>> +               return -ENOMEM;
>> +
>> +       mmio->base = devm_nvdimm_memremap(dev, p->address, 12,
>> +                                        nd_blk_memremap_flags(ndbr));
>> +       if (!mmio->base) {
>> +               dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
>> +               return -ENOMEM;
>> +       }
>> +
>> +       p->mmio = mmio;
>> +
>> +       return 0;
>> +}
> 
> Are there any ppc nvdimm that will use BLK mode? As far as I know
> BLK-mode is only an abandoned mechanism in the ACPI specification, not
> anything that has made it into a shipping implementation. I'd prefer
> to not extend it if it's not necessary.
> 
That is correct. There is no BLK mode/type usage in ppc64. But IIUC, we 
also had difficulty in isolating the BLK test to ACPI systems. The test 
code had dependencies and splitting that out was making it complex.


-aneesh
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms
  2020-12-09  4:17   ` Aneesh Kumar K.V
@ 2020-12-09  4:43     ` Dan Williams
  0 siblings, 0 replies; 13+ messages in thread
From: Dan Williams @ 2020-12-09  4:43 UTC (permalink / raw)
  To: Aneesh Kumar K.V
  Cc: Linux NVDIMM, Vaibhav Jain, Shivaprasad G Bhat, Harish Sriram

On Tue, Dec 8, 2020 at 8:17 PM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 12/8/20 3:30 AM, Dan Williams wrote:
> > On Mon, Oct 5, 2020 at 6:01 PM Santosh Sivaraj <santosh@fossix.org> wrote:
> >
>
> ...
>
> >> +static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
> >> +               void *iobuf, u64 len, int rw)
> >> +{
> >> +       struct ndtest_dimm *dimm = ndbr->blk_provider_data;
> >> +       struct ndtest_blk_mmio *mmio = dimm->mmio;
> >> +       struct nd_region *nd_region = &ndbr->nd_region;
> >> +       unsigned int lane;
> >> +
> >> +       lane = nd_region_acquire_lane(nd_region);
> >> +
> >> +       if (rw)
> >> +               memcpy(mmio->base + dpa, iobuf, len);
> >> +       else {
> >> +               memcpy(iobuf, mmio->base + dpa, len);
> >> +               arch_invalidate_pmem(mmio->base + dpa, len);
> >> +       }
> >> +
> >> +       nd_region_release_lane(nd_region, lane);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
> >> +                                   struct device *dev)
> >> +{
> >> +       struct nd_blk_region *ndbr = to_nd_blk_region(dev);
> >> +       struct nvdimm *nvdimm;
> >> +       struct ndtest_dimm *p;
> >> +       struct ndtest_blk_mmio *mmio;
> >> +
> >> +       nvdimm = nd_blk_region_to_dimm(ndbr);
> >> +       p = nvdimm_provider_data(nvdimm);
> >> +
> >> +       nd_blk_region_set_provider_data(ndbr, p);
> >> +       p->region = to_nd_region(dev);
> >> +
> >> +       mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
> >> +       if (!mmio)
> >> +               return -ENOMEM;
> >> +
> >> +       mmio->base = devm_nvdimm_memremap(dev, p->address, 12,
> >> +                                        nd_blk_memremap_flags(ndbr));
> >> +       if (!mmio->base) {
> >> +               dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
> >> +               return -ENOMEM;
> >> +       }
> >> +
> >> +       p->mmio = mmio;
> >> +
> >> +       return 0;
> >> +}
> >
> > Are there any ppc nvdimm that will use BLK mode? As far as I know
> > BLK-mode is only an abandoned mechanism in the ACPI specification, not
> > anything that has made it into a shipping implementation. I'd prefer
> > to not extend it if it's not necessary.
> >
> That is correct. There is no BLK mode/type usage in ppc64. But IIUC, we
> also had difficulty in isolating the BLK test to ACPI systems. The test
> code had dependencies and splitting that out was making it complex.

I wouldn't be opposed to an "if (nfit_test)" gate for the BLK tests.
Whatever is easiest for you because I'm just thrilled to be able to
regression test the ppc infrastructure, i.e. no need to spend extra
effort making the tests perfect.
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2020-12-09  4:43 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-06  1:00 [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Santosh Sivaraj
2020-10-07  4:22 ` [PATCH RFC ndctl 1/9] libndctl: test enablement for non-nfit devices Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 2/9] test/core: Don't fail is nfit module is missing Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 3/9] test/libndctl: Don't compare phys-id if no-interleave support Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 4/9] test/libndctl: search by handle instead of range index Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 5/9] test/libndctl: skip smart tests for non-nfit platforms Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 6/9] test/libndctl: Don't check for two formats on a dimm Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 7/9] test/libndctl: Don't check for error flags on non-nfit dimms Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 8/9] test/multi-pmem: fix for no-interleave support Santosh Sivaraj
2020-10-07  4:22   ` [PATCH RFC ndctl 9/9] test: Disable paths which are possibly wrong Santosh Sivaraj
2020-12-07 22:00 ` [PATCH RFC v3] testing/nvdimm: Add test module for non-nfit platforms Dan Williams
2020-12-09  4:17   ` Aneesh Kumar K.V
2020-12-09  4:43     ` Dan Williams

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).