linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: Vishal Verma <vishal.l.verma@intel.com>,
	peterz@infradead.org, dave.hansen@linux.intel.com,
	ard.biesheuvel@linaro.org, vishal.l.verma@intel.com,
	linux-mm@kvack.org, linux-nvdimm@lists.01.org,
	joao.m.martins@oracle.com, linux-kernel@vger.kernel.org,
	linux-acpi@vger.kernel.org, dri-devel@lists.freedesktop.org
Subject: [PATCH v4 15/23] device-dax: Add resize support
Date: Sun, 02 Aug 2020 22:03:46 -0700	[thread overview]
Message-ID: <159643102625.4062302.7431838945566033852.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <159643094279.4062302.17779410714418721328.stgit@dwillia2-desk3.amr.corp.intel.com>

Make the device-dax 'size' attribute writable to allow capacity to be
split between multiple instances in a region. The intended consumers of
this capability are users that want to split a scarce memory resource
between device-dax and System-RAM access, or users that want to have
multiple security domains for a large region.

By default the hmem instance provider allocates an entire region to the
first instance. The process of creating a new instance (assuming a
region-id of 0) is find the region and trigger the 'create' attribute
which yields an empty instance to configure. For example:

    cd /sys/bus/dax/devices
    echo dax0.0 > dax0.0/driver/unbind
    echo $new_size > dax0.0/size
    echo 1 > $(readlink -f dax0.0)../dax_region/create
    seed=$(cat $(readlink -f dax0.0)../dax_region/seed)
    echo $new_size > $seed/size
    echo dax0.0 > ../drivers/{device_dax,kmem}/bind
    echo dax0.1 > ../drivers/{device_dax,kmem}/bind

Instances can be destroyed by:

    echo $device > $(readlink -f $device)../dax_region/delete

Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/dax/bus.c |  161 ++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 152 insertions(+), 9 deletions(-)

diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index dce9413a4394..53d07f2f1285 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -6,6 +6,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/dax.h>
+#include <linux/io.h>
 #include "dax-private.h"
 #include "bus.h"
 
@@ -562,7 +563,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
 }
 EXPORT_SYMBOL_GPL(alloc_dax_region);
 
-static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
+static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
+		resource_size_t size)
 {
 	struct dax_region *dax_region = dev_dax->region;
 	struct resource *res = &dax_region->res;
@@ -580,12 +582,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
 		return 0;
 	}
 
-	/* TODO: handle multiple allocations per region */
-	if (res->child)
-		return -ENOMEM;
-
-	alloc = __request_region(res, res->start, size, dev_name(dev), 0);
-
+	alloc = __request_region(res, start, size, dev_name(dev), 0);
 	if (!alloc)
 		return -ENOMEM;
 
@@ -597,6 +594,29 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
 	return 0;
 }
 
+static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
+{
+	struct dax_region *dax_region = dev_dax->region;
+	struct range *range = &dev_dax->range;
+	int rc = 0;
+
+	device_lock_assert(dax_region->dev);
+
+	if (size)
+		rc = adjust_resource(res, range->start, size);
+	else
+		__release_region(&dax_region->res, range->start, range_len(range));
+	if (rc)
+		return rc;
+
+	dev_dax->range = (struct range) {
+		.start = range->start,
+		.end = range->start + size - 1,
+	};
+
+	return 0;
+}
+
 static ssize_t size_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -605,7 +625,127 @@ static ssize_t size_show(struct device *dev,
 
 	return sprintf(buf, "%llu\n", size);
 }
-static DEVICE_ATTR_RO(size);
+
+static bool alloc_is_aligned(struct dax_region *dax_region,
+		resource_size_t size)
+{
+	/*
+	 * The minimum mapping granularity for a device instance is a
+	 * single subsection, unless the arch says otherwise.
+	 */
+	return IS_ALIGNED(size, max_t(unsigned long, dax_region->align,
+				memremap_compat_align()));
+}
+
+static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
+{
+	struct dax_region *dax_region = dev_dax->region;
+	struct range *range = &dev_dax->range;
+	struct resource *res, *adjust = NULL;
+	struct device *dev = &dev_dax->dev;
+
+	for_each_dax_region_resource(dax_region, res)
+		if (strcmp(res->name, dev_name(dev)) == 0
+				&& res->start == range->start) {
+			adjust = res;
+			break;
+		}
+
+	if (dev_WARN_ONCE(dev, !adjust, "failed to find matching resource\n"))
+		return -ENXIO;
+	return adjust_dev_dax_range(dev_dax, adjust, size);
+}
+
+static ssize_t dev_dax_resize(struct dax_region *dax_region,
+		struct dev_dax *dev_dax, resource_size_t size)
+{
+	resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
+	resource_size_t dev_size = range_len(&dev_dax->range);
+	struct resource *region_res = &dax_region->res;
+	struct device *dev = &dev_dax->dev;
+	const char *name = dev_name(dev);
+	struct resource *res, *first;
+
+	if (dev->driver)
+		return -EBUSY;
+	if (size == dev_size)
+		return 0;
+	if (size > dev_size && size - dev_size > avail)
+		return -ENOSPC;
+	if (size < dev_size)
+		return dev_dax_shrink(dev_dax, size);
+
+	to_alloc = size - dev_size;
+	if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc),
+			"resize of %pa misaligned\n", &to_alloc))
+		return -ENXIO;
+
+	/*
+	 * Expand the device into the unused portion of the region. This
+	 * may involve adjusting the end of an existing resource, or
+	 * allocating a new resource.
+	 */
+	first = region_res->child;
+	if (!first)
+		return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
+	for (res = first; to_alloc && res; res = res->sibling) {
+		struct resource *next = res->sibling;
+		resource_size_t free;
+
+		/* space at the beginning of the region */
+		free = 0;
+		if (res == first && res->start > dax_region->res.start)
+			free = res->start - dax_region->res.start;
+		if (free >= to_alloc && dev_size == 0)
+			return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
+
+		free = 0;
+		/* space between allocations */
+		if (next && next->start > res->end + 1)
+			free = next->start - res->end + 1;
+
+		/* space at the end of the region */
+		if (free < to_alloc && !next && res->end < region_res->end)
+			free = region_res->end - res->end;
+
+		if (free >= to_alloc && strcmp(name, res->name) == 0)
+			return adjust_dev_dax_range(dev_dax, res, resource_size(res) + to_alloc);
+		else if (free >= to_alloc && dev_size == 0)
+			return alloc_dev_dax_range(dev_dax, res->end + 1, to_alloc);
+	}
+	return -ENOSPC;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	ssize_t rc;
+	unsigned long long val;
+	struct dev_dax *dev_dax = to_dev_dax(dev);
+	struct dax_region *dax_region = dev_dax->region;
+
+	rc = kstrtoull(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	if (!alloc_is_aligned(dax_region, val)) {
+		dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
+		return -EINVAL;
+	}
+
+	device_lock(dax_region->dev);
+	if (!dax_region->dev->driver) {
+		device_unlock(dax_region->dev);
+		return -ENXIO;
+	}
+	device_lock(dev);
+	rc = dev_dax_resize(dax_region, dev_dax, val);
+	device_unlock(dev);
+	device_unlock(dax_region->dev);
+
+	return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(size);
 
 static int dev_dax_target_node(struct dev_dax *dev_dax)
 {
@@ -654,11 +794,14 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct dev_dax *dev_dax = to_dev_dax(dev);
+	struct dax_region *dax_region = dev_dax->region;
 
 	if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
 		return 0;
 	if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
 		return 0;
+	if (a == &dev_attr_size.attr && is_static(dax_region))
+		return 0444;
 	return a->mode;
 }
 
@@ -739,7 +882,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
 	device_initialize(dev);
 	dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
 
-	rc = alloc_dev_dax_range(dev_dax, data->size);
+	rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
 	if (rc)
 		goto err_range;
 


  parent reply	other threads:[~2020-08-03  5:20 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-03  5:02 [PATCH v4 00/23] device-dax: Support sub-dividing soft-reserved ranges Dan Williams
2020-08-03  5:02 ` [PATCH v4 01/23] x86/numa: Cleanup configuration dependent command-line options Dan Williams
2020-08-03  5:02 ` [PATCH v4 02/23] x86/numa: Add 'nohmat' option Dan Williams
2020-08-03  5:02 ` [PATCH v4 03/23] efi/fake_mem: Arrange for a resource entry per efi_fake_mem instance Dan Williams
2020-08-03  5:02 ` [PATCH v4 04/23] ACPI: HMAT: Refactor hmat_register_target_device to hmem_register_device Dan Williams
2020-08-03  5:02 ` [PATCH v4 05/23] resource: Report parent to walk_iomem_res_desc() callback Dan Williams
2020-08-03  5:02 ` [PATCH v4 06/23] mm/memory_hotplug: Introduce default phys_to_target_node() implementation Dan Williams
2020-08-03  5:03 ` [PATCH v4 07/23] ACPI: HMAT: Attach a device for each soft-reserved range Dan Williams
2020-08-03  5:03 ` [PATCH v4 08/23] device-dax: Drop the dax_region.pfn_flags attribute Dan Williams
2020-08-03  5:03 ` [PATCH v4 09/23] device-dax: Move instance creation parameters to 'struct dev_dax_data' Dan Williams
2020-08-03  5:03 ` [PATCH v4 10/23] device-dax: Make pgmap optional for instance creation Dan Williams
2020-08-03  5:03 ` [PATCH v4 11/23] device-dax: Kill dax_kmem_res Dan Williams
2020-08-21 10:06   ` David Hildenbrand
2020-09-08 15:33     ` Joao Martins
2020-09-08 18:03       ` David Hildenbrand
2020-09-23  8:04       ` David Hildenbrand
2020-09-23 21:41         ` Dan Williams
2020-09-24  7:25           ` David Hildenbrand
2020-09-24 13:54             ` Dan Williams
2020-09-24 18:12               ` David Hildenbrand
2020-09-24 21:26                 ` Dan Williams
2020-09-24 21:41                   ` David Hildenbrand
2020-09-24 21:50                     ` Dan Williams
2020-09-25  8:54                       ` David Hildenbrand
2020-08-03  5:03 ` [PATCH v4 12/23] device-dax: Add an allocation interface for device-dax instances Dan Williams
2020-08-03  5:03 ` [PATCH v4 13/23] device-dax: Introduce 'seed' devices Dan Williams
2020-08-03  5:03 ` [PATCH v4 14/23] drivers/base: Make device_find_child_by_name() compatible with sysfs inputs Dan Williams
2020-08-03  5:03 ` Dan Williams [this message]
2020-08-21 22:56   ` [PATCH v4 15/23] device-dax: Add resize support Andrew Morton
2020-08-03  5:03 ` [PATCH v4 16/23] mm/memremap_pages: Convert to 'struct range' Dan Williams
2020-08-03  5:03 ` [PATCH v4 17/23] mm/memremap_pages: Support multiple ranges per invocation Dan Williams
2020-08-03  5:04 ` [PATCH v4 18/23] device-dax: Add dis-contiguous resource support Dan Williams
2020-08-03  5:04 ` [PATCH v4 19/23] device-dax: Introduce 'mapping' devices Dan Williams
2020-08-03  5:04 ` [PATCH v4 20/23] device-dax: Make align a per-device property Dan Williams
2020-08-03  5:04 ` [PATCH v4 21/23] device-dax: Add an 'align' attribute Dan Williams
2020-08-03  5:04 ` [PATCH v4 22/23] dax/hmem: Introduce dax_hmem.region_idle parameter Dan Williams
2020-08-03  5:04 ` [PATCH v4 23/23] device-dax: Add a range mapping allocation attribute Dan Williams
2020-08-03  7:47 ` [PATCH v4 00/23] device-dax: Support sub-dividing soft-reserved ranges David Hildenbrand
2020-08-20  1:53   ` Dan Williams
2020-08-21 10:15     ` David Hildenbrand
2020-08-21 18:27       ` Dan Williams
2020-08-21 18:30         ` David Hildenbrand
2020-08-21 21:17           ` Dan Williams
2020-08-21 21:33             ` David Hildenbrand
2020-08-21 21:42               ` David Hildenbrand
2020-08-21 21:43               ` David Hildenbrand
2020-08-21 21:46               ` David Hildenbrand
2020-08-21 23:21     ` Andrew Morton
2020-08-22  2:32       ` Leizhen (ThunderTown)
2020-09-08 10:45       ` David Hildenbrand
2020-09-23  0:43         ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=159643102625.4062302.7431838945566033852.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=ard.biesheuvel@linaro.org \
    --cc=dave.hansen@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=joao.m.martins@oracle.com \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=peterz@infradead.org \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).