From: Ben Widawsky <ben.widawsky@intel.com>
To: linux-cxl@vger.kernel.org, nvdimm@lists.linux.dev
Cc: patches@lists.linux.dev, Ben Widawsky <ben.widawsky@intel.com>,
Alison Schofield <alison.schofield@intel.com>,
Dan Williams <dan.j.williams@intel.com>,
Ira Weiny <ira.weiny@intel.com>,
Jonathan Cameron <Jonathan.Cameron@huawei.com>,
Vishal Verma <vishal.l.verma@intel.com>
Subject: [RFC PATCH 08/15] cxl/core/hdm: Allocate resources from the media
Date: Wed, 13 Apr 2022 11:37:13 -0700 [thread overview]
Message-ID: <20220413183720.2444089-9-ben.widawsky@intel.com> (raw)
In-Reply-To: <20220413183720.2444089-1-ben.widawsky@intel.com>
Similar to how decoders consume address space for the root decoder, they
also consume space on the device's physical media. For future
allocations, it's required to mark those as used/busy.
The CXL specification requires that HDM decoder are programmed in
ascending physical address order. The device's address space can
therefore be managed by a simple allocator. Fragmentation may occur if
devices are taken in and out of active decoding. Fixing this is left to
userspace to handle.
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
---
drivers/cxl/core/core.h | 3 +++
drivers/cxl/core/hdm.c | 26 +++++++++++++++++++++++++-
drivers/cxl/core/port.c | 9 ++++++++-
drivers/cxl/cxl.h | 10 ++++++++++
4 files changed, 46 insertions(+), 2 deletions(-)
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 1a50c0fc399c..a507a2502127 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -9,6 +9,9 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
+extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_delete_region;
+
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 37c09c77e9a7..5326a2cd6968 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/genalloc.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -198,8 +199,11 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_ACCELERATOR;
- if (is_endpoint_decoder(&cxld->dev))
+ if (is_endpoint_decoder(&cxld->dev)) {
+ to_cxl_endpoint_decoder(cxld)->skip =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
return 0;
+ }
target_list.value =
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
@@ -218,6 +222,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
int i, committed, failed;
+ u64 base = 0;
u32 ctrl;
/*
@@ -240,6 +245,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
+ struct cxl_endpoint_decoder *cxled;
struct cxl_decoder *cxld;
if (is_cxl_endpoint(port))
@@ -267,6 +273,24 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
"Failed to add decoder to port\n");
return rc;
}
+
+ if (!is_cxl_endpoint(port))
+ continue;
+
+ cxled = to_cxl_endpoint_decoder(cxld);
+ cxled->drange = (struct range) {
+ .start = base,
+ .end = base + range_len(&cxld->range) - 1,
+ };
+
+ if (!range_len(&cxld->range))
+ continue;
+
+ dev_dbg(&cxld->dev,
+ "Enumerated decoder with DPA range %#llx-%#llx\n", base,
+ base + range_len(&cxled->drange));
+ base += cxled->skip + range_len(&cxld->range);
+ port->last_cxled = cxled;
}
if (failed == cxlhdm->decoder_count) {
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 0d946711685b..9ef8d69dbfa5 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -84,7 +84,14 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
+ if (is_endpoint_decoder(dev)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = to_cxl_endpoint_decoder(cxld);
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxled->drange));
+ } else {
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
+ }
}
static DEVICE_ATTR_RO(size);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 52295548a071..33f8a55f2f84 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -228,9 +228,13 @@ struct cxl_decoder {
/**
* struct cxl_endpoint_decoder - An decoder residing in a CXL endpoint.
* @base: Base class decoder
+ * @drange: Device physical address space this decoder is using
+ * @skip: The skip count as specified in the CXL specification.
*/
struct cxl_endpoint_decoder {
struct cxl_decoder base;
+ struct range drange;
+ u64 skip;
};
/**
@@ -248,11 +252,15 @@ struct cxl_switch_decoder {
* @base: Base class decoder
* @window: host address space allocator
* @targets: Downstream targets (ie. hostbridges).
+ * @next_region_id: The pre-cached next region id.
+ * @id_lock: Protects next_region_id
*/
struct cxl_root_decoder {
struct cxl_decoder base;
struct gen_pool *window;
struct cxl_decoder_targets *targets;
+ int next_region_id;
+ struct mutex id_lock; /* synchronizes access to next_region_id */
};
#define _to_cxl_decoder(x) \
@@ -312,6 +320,7 @@ struct cxl_nvdimm {
* @capacity: How much total storage the media can hold (endpoint only)
* @pmem_offset: Partition dividing volatile, [0, pmem_offset -1 ], and persistent
* [pmem_offset, capacity - 1] addresses.
+ * @last_cxled: Last active decoder doing decode (endpoint only)
*/
struct cxl_port {
struct device dev;
@@ -326,6 +335,7 @@ struct cxl_port {
u64 capacity;
u64 pmem_offset;
+ struct cxl_endpoint_decoder *last_cxled;
};
/**
--
2.35.1
next prev parent reply other threads:[~2022-04-13 18:37 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-13 18:37 [RFC PATCH 00/15] Region driver Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 01/15] cxl/core: Use is_endpoint_decoder Ben Widawsky
2022-04-13 21:22 ` Dan Williams
[not found] ` <CGME20220415205052uscas1p209e03abf95b9c80b2ba1f287c82dfd80@uscas1p2.samsung.com>
2022-04-15 20:50 ` Adam Manzanares
2022-04-13 18:37 ` [RFC PATCH 02/15] cxl/core/hdm: Bail on endpoint init fail Ben Widawsky
2022-04-13 21:31 ` Dan Williams
[not found] ` <CGME20220418163713uscas1p17b3b1b45c7d27e54e3ecb62eb8af2469@uscas1p1.samsung.com>
2022-04-18 16:37 ` Adam Manzanares
2022-05-12 15:50 ` Ben Widawsky
2022-05-12 17:27 ` Luis Chamberlain
2022-05-13 12:09 ` Jonathan Cameron
2022-05-13 15:03 ` Dan Williams
2022-05-13 15:12 ` Luis Chamberlain
2022-05-13 19:14 ` Dan Williams
2022-05-13 19:31 ` Luis Chamberlain
2022-05-19 5:09 ` Dan Williams
2022-04-13 18:37 ` [RFC PATCH 03/15] Revert "cxl/core: Convert decoder range to resource" Ben Widawsky
2022-04-13 21:43 ` Dan Williams
2022-05-12 16:09 ` Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 04/15] cxl/core: Create distinct decoder structs Ben Widawsky
2022-04-15 1:45 ` Dan Williams
2022-04-18 20:43 ` Dan Williams
2022-04-13 18:37 ` [RFC PATCH 05/15] cxl/acpi: Reserve CXL resources from request_free_mem_region Ben Widawsky
2022-04-18 16:42 ` Dan Williams
2022-04-19 16:43 ` Jason Gunthorpe
2022-04-19 21:50 ` Dan Williams
2022-04-19 21:59 ` Dan Williams
2022-04-19 23:04 ` Jason Gunthorpe
2022-04-20 0:47 ` Dan Williams
2022-04-20 14:34 ` Jason Gunthorpe
2022-04-20 15:32 ` Dan Williams
2022-04-13 18:37 ` [RFC PATCH 06/15] cxl/acpi: Manage root decoder's address space Ben Widawsky
2022-04-18 22:15 ` Dan Williams
2022-05-12 19:18 ` Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 07/15] cxl/port: Surface ram and pmem resources Ben Widawsky
2022-04-13 18:37 ` Ben Widawsky [this message]
2022-04-13 18:37 ` [RFC PATCH 09/15] cxl/core/port: Add attrs for size and volatility Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 10/15] cxl/core: Extract IW/IG decoding Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 11/15] cxl/acpi: Use common " Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 12/15] cxl/region: Add region creation ABI Ben Widawsky
2022-05-04 22:56 ` Verma, Vishal L
2022-05-05 5:17 ` Dan Williams
2022-05-12 15:54 ` Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 13/15] cxl/core/port: Add attrs for root ways & granularity Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 14/15] cxl/region: Introduce configuration Ben Widawsky
2022-04-13 18:37 ` [RFC PATCH 15/15] cxl/region: Introduce a cxl_region driver Ben Widawsky
2022-05-20 16:23 ` [RFC PATCH 00/15] Region driver Jonathan Cameron
2022-05-20 16:41 ` Dan Williams
2022-05-31 12:21 ` Jonathan Cameron
2022-06-23 5:40 ` Dan Williams
2022-06-23 15:08 ` Jonathan Cameron
2022-06-23 17:33 ` Dan Williams
2022-06-23 23:44 ` Dan Williams
2022-06-24 9:08 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220413183720.2444089-9-ben.widawsky@intel.com \
--to=ben.widawsky@intel.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=alison.schofield@intel.com \
--cc=dan.j.williams@intel.com \
--cc=ira.weiny@intel.com \
--cc=linux-cxl@vger.kernel.org \
--cc=nvdimm@lists.linux.dev \
--cc=patches@lists.linux.dev \
--cc=vishal.l.verma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).