From: Dan Williams <dan.j.williams@intel.com>
To: linux-mm@kvack.org
Cc: dave.hansen@linux.intel.com, hch@lst.de,
linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org
Subject: [PATCH 08/12] device-dax: Add resize support
Date: Mon, 23 Mar 2020 16:55:18 -0700 [thread overview]
Message-ID: <158500771845.2088294.637621783660044227.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <158500767138.2088294.17131646259803932461.stgit@dwillia2-desk3.amr.corp.intel.com>
Make the device-dax 'size' attribute writable to allow capacity to be
split between multiple instances in a region. The intended consumers of
this capability are users that want to split a scarce memory resource
between device-dax and System-RAM access, or users that want to have
multiple security domains for a large region.
By default the hmem instance provider allocates an entire region to the
first instance. The process of creating a new instance (assuming a
region-id of 0) is find the region and trigger the 'create' attribute
which yields an empty instance to configure. For example:
cd /sys/bus/dax/devices
echo dax0.0 > dax0.0/driver/unbind
echo $new_size > dax0.0/size
echo 1 > $(readlink -f dax0.0)../dax_region/create
seed=$(cat $(readlink -f dax0.0)../dax_region/seed)
echo $new_size > $seed/size
echo dax0.0 > ../drivers/{device_dax,kmem}/bind
echo dax0.1 > ../drivers/{device_dax,kmem}/bind
Instances can be destroyed by:
echo $device > $(readlink -f $device)../dax_region/delete
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
drivers/dax/bus.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 176 insertions(+), 10 deletions(-)
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 8db771feed3d..6eb77127bb7d 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dax.h>
+#include <linux/io.h>
#include "dax-private.h"
#include "bus.h"
@@ -541,7 +542,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
-static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
+static int __alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
+ resource_size_t size)
{
struct dax_region *dax_region = dev_dax->region;
struct resource *res = &dax_region->res;
@@ -550,8 +552,34 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
device_lock_assert(dax_region->dev);
+ if (dev_WARN_ONCE(&dev_dax->dev, !size, "non-zero size required\n"))
+ return -EINVAL;
+
+ /* allow default @start when the resource tree is empty */
+ if (start == U64_MAX && !res->child)
+ start = res->start;
+ if (start == U64_MAX)
+ return -EINVAL;
+
+ alloc = __request_region(res, start, size, dev_name(dev), 0);
+ if (!alloc)
+ return -ENOMEM;
+
+ dev_dax->range = (struct range) {
+ .start = alloc->start,
+ .end = alloc->end,
+ };
+
+ return 0;
+}
+
+static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
+{
/* handle the seed alloc special case */
if (!size) {
+ struct dax_region *dax_region = dev_dax->region;
+ struct resource *res = &dax_region->res;
+
dev_dax->range = (struct range) {
.start = res->start,
.end = res->start - 1,
@@ -559,18 +587,29 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
return 0;
}
- /* TODO: handle multiple allocations per region */
- if (res->child)
- return -ENOMEM;
+ return __alloc_dev_dax_range(dev_dax, U64_MAX, size);
+}
- alloc = __request_region(res, res->start, size, dev_name(dev), 0);
+static int __adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res,
+ resource_size_t size)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct range *range = &dev_dax->range;
+ int rc = 0;
- if (!alloc)
- return -ENOMEM;
+ device_lock_assert(dax_region->dev);
+
+ if (size)
+ rc = adjust_resource(res, range->start, size);
+ else
+ __release_region(&dax_region->res, range->start,
+ range_len(range));
+ if (rc)
+ return rc;
dev_dax->range = (struct range) {
- .start = alloc->start,
- .end = alloc->end,
+ .start = range->start,
+ .end = range->start + size - 1,
};
return 0;
@@ -584,7 +623,131 @@ static ssize_t size_show(struct device *dev,
return sprintf(buf, "%llu\n", size);
}
-static DEVICE_ATTR_RO(size);
+
+static bool alloc_is_aligned(struct dax_region *dax_region,
+ resource_size_t size)
+{
+ /*
+ * The minimum mapping granularity for a device instance is a
+ * single subsection, unless the arch says otherwise.
+ */
+ return IS_ALIGNED(size, max_t(unsigned long, dax_region->align,
+ memremap_compat_align()));
+}
+
+static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct range *range = &dev_dax->range;
+ struct resource *res, *adjust = NULL;
+ struct device *dev = &dev_dax->dev;
+
+ for_each_dax_region_resource(dax_region, res)
+ if (strcmp(res->name, dev_name(dev)) == 0
+ && res->start == range->start) {
+ adjust = res;
+ break;
+ }
+
+ if (dev_WARN_ONCE(dev, !adjust, "failed to find matching resource\n"))
+ return -ENXIO;
+ return __adjust_dev_dax_range(dev_dax, adjust, size);
+}
+
+static ssize_t dev_dax_resize(struct dax_region *dax_region,
+ struct dev_dax *dev_dax, resource_size_t size)
+{
+ resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
+ resource_size_t dev_size = range_len(&dev_dax->range);
+ struct resource *region_res = &dax_region->res;
+ struct device *dev = &dev_dax->dev;
+ const char *name = dev_name(dev);
+ struct resource *res, *first;
+
+ if (dev->driver)
+ return -EBUSY;
+ if (size == dev_size)
+ return 0;
+ if (size > dev_size && size - dev_size > avail)
+ return -ENOSPC;
+ if (size < dev_size)
+ return dev_dax_shrink(dev_dax, size);
+
+ to_alloc = size - dev_size;
+ if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc),
+ "resize of %pa misaligned\n", &to_alloc))
+ return -ENXIO;
+
+ /*
+ * Expand the device into the unused portion of the region. This
+ * may involve adjusting the end of an existing resource, or
+ * allocating a new resource.
+ */
+ first = region_res->child;
+ if (!first)
+ return __alloc_dev_dax_range(dev_dax, dax_region->res.start,
+ to_alloc);
+ for (res = first; to_alloc && res; res = res->sibling) {
+ struct resource *next = res->sibling;
+ resource_size_t free;
+
+ /* space at the beginning of the region */
+ free = 0;
+ if (res == first && res->start > dax_region->res.start)
+ free = res->start - dax_region->res.start;
+ if (free >= to_alloc && dev_size == 0)
+ return __alloc_dev_dax_range(dev_dax,
+ dax_region->res.start, to_alloc);
+
+ free = 0;
+ /* space between allocations */
+ if (next && next->start > res->end + 1)
+ free = next->start - res->end + 1;
+
+ /* space at the end of the region */
+ if (free < to_alloc && !next && res->end < region_res->end)
+ free = region_res->end - res->end;
+
+ if (free >= to_alloc && strcmp(name, res->name) == 0)
+ return __adjust_dev_dax_range(dev_dax, res,
+ resource_size(res) + to_alloc);
+ else if (free >= to_alloc && dev_size == 0)
+ return __alloc_dev_dax_range(dev_dax, res->end + 1,
+ to_alloc);
+ }
+ return -ENOSPC;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ ssize_t rc;
+ unsigned long long val;
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+
+ rc = kstrtoull(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (!alloc_is_aligned(dax_region, val)) {
+ dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
+ return -EINVAL;
+ }
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+ device_lock(dev);
+ rc = dev_dax_resize(dax_region, dev_dax, val);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(size);
static int dev_dax_target_node(struct dev_dax *dev_dax)
{
@@ -633,11 +796,14 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
+ if (a == &dev_attr_size.attr && is_static(dax_region))
+ return 0444;
return a->mode;
}
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org
next prev parent reply other threads:[~2020-03-24 0:11 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-23 23:54 [PATCH 00/12] device-dax: Support sub-dividing soft-reserved ranges Dan Williams
2020-03-23 23:54 ` [PATCH 01/12] device-dax: Drop the dax_region.pfn_flags attribute Dan Williams
2020-03-23 23:54 ` [PATCH 02/12] device-dax: Move instance creation parameters to 'struct dev_dax_data' Dan Williams
2020-03-23 23:54 ` [PATCH 03/12] device-dax: Make pgmap optional for instance creation Dan Williams
2020-03-23 23:54 ` [PATCH 04/12] device-dax: Kill dax_kmem_res Dan Williams
2020-03-23 23:55 ` [PATCH 05/12] device-dax: Add an allocation interface for device-dax instances Dan Williams
2020-03-23 23:55 ` [PATCH 06/12] device-dax: Introduce seed devices Dan Williams
2020-03-23 23:55 ` [PATCH 07/12] drivers/base: Make device_find_child_by_name() compatible with sysfs inputs Dan Williams
2020-03-23 23:55 ` Dan Williams [this message]
2020-03-23 23:55 ` [PATCH 09/12] mm/memremap_pages: Convert to 'struct range' Dan Williams
2020-03-23 23:55 ` [PATCH 10/12] mm/memremap_pages: Support multiple ranges per invocation Dan Williams
2020-03-23 23:55 ` [PATCH 11/12] device-dax: Add dis-contiguous resource support Dan Williams
2020-03-24 16:12 ` Joao Martins
2020-03-25 10:35 ` Joao Martins
2020-03-25 17:48 ` Dan Williams
2020-03-26 17:49 ` Joao Martins
2020-07-11 0:44 ` Dan Williams
2020-04-06 10:43 ` Joao Martins
2020-04-06 20:22 ` Dan Williams
2020-07-11 0:47 ` Dan Williams
2020-05-12 14:36 ` Joao Martins
2020-07-11 0:52 ` Dan Williams
2020-03-23 23:55 ` [PATCH 12/12] device-dax: Introduce 'mapping' devices Dan Williams
2020-03-24 16:27 ` Joao Martins
2020-03-24 23:51 ` Dan Williams
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=158500771845.2088294.637621783660044227.stgit@dwillia2-desk3.amr.corp.intel.com \
--to=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=hch@lst.de \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).