All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: linux-kernel@vger.kernel.org
Cc: linux-nvdimm@lists.01.org, x86@kernel.org,
	"Rafael J. Wysocki" <rjw@rjwysocki.net>,
	linux-mm@kvack.org,
	Jonathan Cameron <Jonathan.Cameron@huawei.com>,
	Len Brown <lenb@kernel.org>
Subject: [RFC PATCH 3/5] acpi/hmat: Track target address ranges
Date: Thu, 04 Apr 2019 12:08:44 -0700	[thread overview]
Message-ID: <155440492414.3190322.12683374224345847860.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <155440490809.3190322.15060922240602775809.stgit@dwillia2-desk3.amr.corp.intel.com>

As of ACPI 6.3 the HMAT no longer advertises the physical memory address
range for its entries. Instead, the expectation is the corresponding
entry in the SRAT is looked up by the target proximity domain.

Given there may be multiple distinct address ranges that share the same
performance profile (sparse address space), find_mem_target() is updated
to also consider the start address of the memory range. Target property
updates are also adjusted to loop over all possible 'struct target'
instances that may share the same proximity domain identification.

Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Len Brown <lenb@kernel.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/acpi/hmat/hmat.c |   77 ++++++++++++++++++++++++++++++++--------------
 1 file changed, 53 insertions(+), 24 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index b275016ff648..e7ae44c8d359 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -38,6 +38,7 @@ static struct memory_locality *localities_types[4];
 
 struct memory_target {
 	struct list_head node;
+	u64 start, size;
 	unsigned int memory_pxm;
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
@@ -63,12 +64,13 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 	return NULL;
 }
 
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static __init struct memory_target *find_mem_target(unsigned int mem_pxm,
+		u64 start)
 {
 	struct memory_target *target;
 
 	list_for_each_entry(target, &targets, node)
-		if (target->memory_pxm == mem_pxm)
+		if (target->memory_pxm == mem_pxm && target->start == start)
 			return target;
 	return NULL;
 }
@@ -92,14 +94,15 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
 	list_add_tail(&initiator->node, &initiators);
 }
 
-static __init void alloc_memory_target(unsigned int mem_pxm)
+static __init void alloc_memory_target(unsigned int mem_pxm,
+		u64 start, u64 size)
 {
 	struct memory_target *target;
 
 	if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
 		return;
 
-	target = find_mem_target(mem_pxm);
+	target = find_mem_target(mem_pxm, start);
 	if (target)
 		return;
 
@@ -109,6 +112,8 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
 
 	target->memory_pxm = mem_pxm;
 	target->processor_pxm = PXM_INVAL;
+	target->start = start;
+	target->size = size;
 	list_add_tail(&target->node, &targets);
 }
 
@@ -183,8 +188,8 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
 	return value;
 }
 
-static __init void hmat_update_target_access(struct memory_target *target,
-					     u8 type, u32 value)
+static __init void __hmat_update_target_access(struct memory_target *target,
+		u8 type, u32 value)
 {
 	switch (type) {
 	case ACPI_HMAT_ACCESS_LATENCY:
@@ -212,6 +217,20 @@ static __init void hmat_update_target_access(struct memory_target *target,
 	}
 }
 
+static __init void hmat_update_target_access(int memory_pxm, int processor_pxm,
+		u8 type, u32 value)
+{
+	struct memory_target *target;
+
+	list_for_each_entry(target, &targets, node) {
+		if (target->processor_pxm != processor_pxm)
+			continue;
+		if (target->memory_pxm != memory_pxm)
+			continue;
+		__hmat_update_target_access(target, type, value);
+	}
+}
+
 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
 {
 	struct memory_locality *loc;
@@ -255,7 +274,6 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
 				      const unsigned long end)
 {
 	struct acpi_hmat_locality *hmat_loc = (void *)header;
-	struct memory_target *target;
 	unsigned int init, targ, total_size, ipds, tpds;
 	u32 *inits, *targs, value;
 	u16 *entries;
@@ -296,11 +314,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
 				inits[init], targs[targ], value,
 				hmat_data_type_suffix(type));
 
-			if (mem_hier == ACPI_HMAT_MEMORY) {
-				target = find_mem_target(targs[targ]);
-				if (target && target->processor_pxm == inits[init])
-					hmat_update_target_access(target, type, value);
-			}
+			if (mem_hier == ACPI_HMAT_MEMORY)
+				hmat_update_target_access(targs[targ],
+						inits[init], type, value);
 		}
 	}
 
@@ -367,6 +383,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
 {
 	struct acpi_hmat_proximity_domain *p = (void *)header;
 	struct memory_target *target = NULL;
+	bool found = false;
 
 	if (p->header.length != sizeof(*p)) {
 		pr_notice("HMAT: Unexpected address range header length: %d\n",
@@ -382,23 +399,34 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
 			p->flags, p->processor_PD, p->memory_PD);
 
-	if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
-		target = find_mem_target(p->memory_PD);
-		if (!target) {
-			pr_debug("HMAT: Memory Domain missing from SRAT\n");
-			return -EINVAL;
-		}
-	}
-	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
-		int p_node = pxm_to_node(p->processor_PD);
+	if ((p->flags & ACPI_HMAT_MEMORY_PD_VALID) == 0)
+		return 0;
+
+	list_for_each_entry(target, &targets, node) {
+		int p_node;
+
+		if (target->memory_pxm != p->memory_PD)
+			continue;
+		found = true;
 
+		if ((p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) == 0)
+			continue;
+
+		p_node = pxm_to_node(p->processor_PD);
 		if (p_node == NUMA_NO_NODE) {
-			pr_debug("HMAT: Invalid Processor Domain\n");
+			pr_debug("HMAT: Invalid Processor Domain: %d\n",
+					p->processor_PD);
 			return -EINVAL;
 		}
+
 		target->processor_pxm = p_node;
 	}
 
+	if (!found) {
+		pr_debug("HMAT: Memory Domain missing from SRAT for pxm: %d\n",
+				p->memory_PD);
+		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -431,7 +459,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
 		return -EINVAL;
 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
 		return 0;
-	alloc_memory_target(ma->proximity_domain);
+	alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
 	return 0;
 }
 
@@ -568,7 +596,8 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 				clear_bit(initiator->processor_pxm, p_nodes);
 		}
 		if (best)
-			hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+			__hmat_update_target_access(target,
+					loc->hmat_loc->data_type, best);
 	}
 
 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: linux-kernel@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>,
	Len Brown <lenb@kernel.org>, Keith Busch <keith.busch@intel.com>,
	Jonathan Cameron <Jonathan.Cameron@huawei.com>,
	vishal.l.verma@intel.com, x86@kernel.org, linux-mm@kvack.org,
	keith.busch@intel.com, vishal.l.verma@intel.com,
	linux-nvdimm@lists.01.org
Subject: [RFC PATCH 3/5] acpi/hmat: Track target address ranges
Date: Thu, 04 Apr 2019 12:08:44 -0700	[thread overview]
Message-ID: <155440492414.3190322.12683374224345847860.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <155440490809.3190322.15060922240602775809.stgit@dwillia2-desk3.amr.corp.intel.com>

As of ACPI 6.3 the HMAT no longer advertises the physical memory address
range for its entries. Instead, the expectation is the corresponding
entry in the SRAT is looked up by the target proximity domain.

Given there may be multiple distinct address ranges that share the same
performance profile (sparse address space), find_mem_target() is updated
to also consider the start address of the memory range. Target property
updates are also adjusted to loop over all possible 'struct target'
instances that may share the same proximity domain identification.

Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Len Brown <lenb@kernel.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/acpi/hmat/hmat.c |   77 ++++++++++++++++++++++++++++++++--------------
 1 file changed, 53 insertions(+), 24 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index b275016ff648..e7ae44c8d359 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -38,6 +38,7 @@ static struct memory_locality *localities_types[4];
 
 struct memory_target {
 	struct list_head node;
+	u64 start, size;
 	unsigned int memory_pxm;
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
@@ -63,12 +64,13 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 	return NULL;
 }
 
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static __init struct memory_target *find_mem_target(unsigned int mem_pxm,
+		u64 start)
 {
 	struct memory_target *target;
 
 	list_for_each_entry(target, &targets, node)
-		if (target->memory_pxm == mem_pxm)
+		if (target->memory_pxm == mem_pxm && target->start == start)
 			return target;
 	return NULL;
 }
@@ -92,14 +94,15 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
 	list_add_tail(&initiator->node, &initiators);
 }
 
-static __init void alloc_memory_target(unsigned int mem_pxm)
+static __init void alloc_memory_target(unsigned int mem_pxm,
+		u64 start, u64 size)
 {
 	struct memory_target *target;
 
 	if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
 		return;
 
-	target = find_mem_target(mem_pxm);
+	target = find_mem_target(mem_pxm, start);
 	if (target)
 		return;
 
@@ -109,6 +112,8 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
 
 	target->memory_pxm = mem_pxm;
 	target->processor_pxm = PXM_INVAL;
+	target->start = start;
+	target->size = size;
 	list_add_tail(&target->node, &targets);
 }
 
@@ -183,8 +188,8 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
 	return value;
 }
 
-static __init void hmat_update_target_access(struct memory_target *target,
-					     u8 type, u32 value)
+static __init void __hmat_update_target_access(struct memory_target *target,
+		u8 type, u32 value)
 {
 	switch (type) {
 	case ACPI_HMAT_ACCESS_LATENCY:
@@ -212,6 +217,20 @@ static __init void hmat_update_target_access(struct memory_target *target,
 	}
 }
 
+static __init void hmat_update_target_access(int memory_pxm, int processor_pxm,
+		u8 type, u32 value)
+{
+	struct memory_target *target;
+
+	list_for_each_entry(target, &targets, node) {
+		if (target->processor_pxm != processor_pxm)
+			continue;
+		if (target->memory_pxm != memory_pxm)
+			continue;
+		__hmat_update_target_access(target, type, value);
+	}
+}
+
 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
 {
 	struct memory_locality *loc;
@@ -255,7 +274,6 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
 				      const unsigned long end)
 {
 	struct acpi_hmat_locality *hmat_loc = (void *)header;
-	struct memory_target *target;
 	unsigned int init, targ, total_size, ipds, tpds;
 	u32 *inits, *targs, value;
 	u16 *entries;
@@ -296,11 +314,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
 				inits[init], targs[targ], value,
 				hmat_data_type_suffix(type));
 
-			if (mem_hier == ACPI_HMAT_MEMORY) {
-				target = find_mem_target(targs[targ]);
-				if (target && target->processor_pxm == inits[init])
-					hmat_update_target_access(target, type, value);
-			}
+			if (mem_hier == ACPI_HMAT_MEMORY)
+				hmat_update_target_access(targs[targ],
+						inits[init], type, value);
 		}
 	}
 
@@ -367,6 +383,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
 {
 	struct acpi_hmat_proximity_domain *p = (void *)header;
 	struct memory_target *target = NULL;
+	bool found = false;
 
 	if (p->header.length != sizeof(*p)) {
 		pr_notice("HMAT: Unexpected address range header length: %d\n",
@@ -382,23 +399,34 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
 			p->flags, p->processor_PD, p->memory_PD);
 
-	if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
-		target = find_mem_target(p->memory_PD);
-		if (!target) {
-			pr_debug("HMAT: Memory Domain missing from SRAT\n");
-			return -EINVAL;
-		}
-	}
-	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
-		int p_node = pxm_to_node(p->processor_PD);
+	if ((p->flags & ACPI_HMAT_MEMORY_PD_VALID) == 0)
+		return 0;
+
+	list_for_each_entry(target, &targets, node) {
+		int p_node;
+
+		if (target->memory_pxm != p->memory_PD)
+			continue;
+		found = true;
 
+		if ((p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) == 0)
+			continue;
+
+		p_node = pxm_to_node(p->processor_PD);
 		if (p_node == NUMA_NO_NODE) {
-			pr_debug("HMAT: Invalid Processor Domain\n");
+			pr_debug("HMAT: Invalid Processor Domain: %d\n",
+					p->processor_PD);
 			return -EINVAL;
 		}
+
 		target->processor_pxm = p_node;
 	}
 
+	if (!found) {
+		pr_debug("HMAT: Memory Domain missing from SRAT for pxm: %d\n",
+				p->memory_PD);
+		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -431,7 +459,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
 		return -EINVAL;
 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
 		return 0;
-	alloc_memory_target(ma->proximity_domain);
+	alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
 	return 0;
 }
 
@@ -568,7 +596,8 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 				clear_bit(initiator->processor_pxm, p_nodes);
 		}
 		if (best)
-			hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+			__hmat_update_target_access(target,
+					loc->hmat_loc->data_type, best);
 	}
 
 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {


  parent reply	other threads:[~2019-04-04 19:21 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-04 19:08 [RFC PATCH 0/5] EFI Special Purpose Memory Support Dan Williams
2019-04-04 19:08 ` Dan Williams
2019-04-04 19:08 ` [RFC PATCH 1/5] efi: Detect UEFI 2.8 Special Purpose Memory Dan Williams
2019-04-04 19:08   ` Dan Williams
2019-04-06  4:21   ` Ard Biesheuvel
2019-04-06  4:21     ` Ard Biesheuvel
2019-04-06  4:21     ` Ard Biesheuvel
2019-04-09 16:43     ` Dan Williams
2019-04-09 16:43       ` Dan Williams
2019-04-09 16:43       ` Dan Williams
2019-04-09 17:21       ` Ard Biesheuvel
2019-04-09 17:21         ` Ard Biesheuvel
2019-04-09 17:21         ` Ard Biesheuvel
2019-04-10  2:10         ` Dan Williams
2019-04-10  2:10           ` Dan Williams
2019-04-12 20:43           ` Ard Biesheuvel
2019-04-12 20:43             ` Ard Biesheuvel
2019-04-12 20:43             ` Ard Biesheuvel
2019-04-12 21:18             ` Dan Williams
2019-04-12 21:18               ` Dan Williams
2019-04-12 21:18               ` Dan Williams
2019-04-15 11:43       ` Enrico Weigelt, metux IT consult
2019-04-04 19:08 ` [RFC PATCH 2/5] lib/memregion: Uplevel the pmem "region" ida to a global allocator Dan Williams
2019-04-04 19:08   ` Dan Williams
2019-04-04 19:32   ` Matthew Wilcox
2019-04-04 21:02     ` Dan Williams
2019-04-04 21:02       ` Dan Williams
2019-04-04 21:02       ` Dan Williams
2019-04-04 19:08 ` Dan Williams [this message]
2019-04-04 19:08   ` [RFC PATCH 3/5] acpi/hmat: Track target address ranges Dan Williams
2019-04-04 20:58   ` Keith Busch
2019-04-04 20:58     ` Keith Busch
2019-04-04 20:58     ` Dan Williams
2019-04-04 20:58       ` Dan Williams
2019-04-04 20:58       ` Dan Williams
2019-04-04 19:08 ` [RFC PATCH 4/5] acpi/hmat: Register special purpose memory as a device Dan Williams
2019-04-04 19:08   ` Dan Williams
2019-04-05 11:18   ` Jonathan Cameron
2019-04-05 11:18     ` Jonathan Cameron
2019-04-05 15:43     ` Dan Williams
2019-04-05 15:43       ` Dan Williams
2019-04-05 15:43       ` Dan Williams
2019-04-05 16:23       ` Jonathan Cameron
2019-04-05 16:56         ` Dan Williams
2019-04-05 16:56           ` Dan Williams
2019-04-05 16:56           ` Dan Williams
     [not found]           ` <CAPcyv4hxBFcJKbVVgNiE4UYXZS4XY9hfE8W9mN+VrcWS9AvJLw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-04-05 17:39             ` Jonathan Cameron
2019-04-05 17:39               ` Jonathan Cameron
2019-04-09 12:13   ` Christoph Hellwig
2019-04-09 12:13     ` Christoph Hellwig
2019-04-09 14:49     ` Dan Williams
2019-04-09 14:49       ` Dan Williams
2019-04-09 14:49       ` Dan Williams
2019-04-04 19:08 ` [RFC PATCH 5/5] device-dax: Add a driver for "hmem" devices Dan Williams
2019-04-04 19:08   ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=155440492414.3190322.12683374224345847860.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=lenb@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=rjw@rjwysocki.net \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.