linux-acpi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv2 1/2] hmat: Register memory-side cache after parsing
@ 2019-05-15 21:54 Keith Busch
  2019-05-15 21:54 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Keith Busch @ 2019-05-15 21:54 UTC (permalink / raw)
  To: linux-kernel, linux-acpi, Rafael Wysocki
  Cc: Dan Williams, Dave Hansen, Brice Goglin, Keith Busch

Instead of registering the hmat cache attributes in line with parsing
the table, save the attributes in the memory target and register them
after parsing completes. This will make it easier to register the
attributes later when hot add is supported.

Signed-off-by: Keith Busch <keith.busch@intel.com>
---
v1 -> v2:

  Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
  1 always existed (Brice).

 drivers/acpi/hmat/hmat.c | 70 +++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 55 insertions(+), 15 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index 96b7d39a97c6..bf23c9a27958 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -36,11 +36,17 @@ enum locality_types {
 
 static struct memory_locality *localities_types[4];
 
+struct target_cache {
+	struct list_head node;
+	struct node_cache_attrs cache_attrs;
+};
+
 struct memory_target {
 	struct list_head node;
 	unsigned int memory_pxm;
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
+	struct list_head caches;
 };
 
 struct memory_initiator {
@@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
 	target->memory_pxm = mem_pxm;
 	target->processor_pxm = PXM_INVAL;
 	list_add_tail(&target->node, &targets);
+	INIT_LIST_HEAD(&target->caches);
 }
 
 static __init const char *hmat_data_type(u8 type)
@@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
 				   const unsigned long end)
 {
 	struct acpi_hmat_cache *cache = (void *)header;
-	struct node_cache_attrs cache_attrs;
+	struct memory_target *target;
+	struct target_cache *tcache;
 	u32 attrs;
 
 	if (cache->header.length < sizeof(*cache)) {
@@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
 		cache->memory_PD, cache->cache_size, attrs,
 		cache->number_of_SMBIOShandles);
 
-	cache_attrs.size = cache->cache_size;
-	cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
-	cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+	target = find_mem_target(cache->memory_PD);
+	if (!target)
+		return 0;
+
+	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
+	if (!tcache) {
+		pr_notice_once("Failed to allocate HMAT cache info\n");
+		return 0;
+	}
+
+	tcache->cache_attrs.size = cache->cache_size;
+	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
 
 	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
 	case ACPI_HMAT_CA_DIRECT_MAPPED:
-		cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
 		break;
 	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
-		cache_attrs.indexing = NODE_CACHE_INDEXED;
+		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
 		break;
 	case ACPI_HMAT_CA_NONE:
 	default:
-		cache_attrs.indexing = NODE_CACHE_OTHER;
+		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
 		break;
 	}
 
 	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
 	case ACPI_HMAT_CP_WB:
-		cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
 		break;
 	case ACPI_HMAT_CP_WT:
-		cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
 		break;
 	case ACPI_HMAT_CP_NONE:
 	default:
-		cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
 		break;
 	}
+	list_add_tail(&tcache->node, &target->caches);
 
-	node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
 	return 0;
 }
 
@@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 	}
 }
 
+static __init void hmat_register_target_cache(struct memory_target *target)
+{
+	unsigned mem_nid = pxm_to_node(target->memory_pxm);
+	struct target_cache *tcache;
+
+	list_for_each_entry(tcache, &target->caches, node)
+		node_add_cache(mem_nid, &tcache->cache_attrs);
+}
+
 static __init void hmat_register_target_perf(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
 }
 
+static __init void hmat_register_target(struct memory_target *target)
+{
+	if (!node_online(pxm_to_node(target->memory_pxm)))
+		return;
+
+	hmat_register_target_initiators(target);
+	hmat_register_target_cache(target);
+	hmat_register_target_perf(target);
+}
+
 static __init void hmat_register_targets(void)
 {
 	struct memory_target *target;
 
-	list_for_each_entry(target, &targets, node) {
-		hmat_register_target_initiators(target);
-		hmat_register_target_perf(target);
-	}
+	list_for_each_entry(target, &targets, node)
+		hmat_register_target(target);
 }
 
 static __init void hmat_free_structures(void)
@@ -598,8 +633,13 @@ static __init void hmat_free_structures(void)
 	struct memory_target *target, *tnext;
 	struct memory_locality *loc, *lnext;
 	struct memory_initiator *initiator, *inext;
+	struct target_cache *tcache, *cnext;
 
 	list_for_each_entry_safe(target, tnext, &targets, node) {
+		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
+			list_del(&tcache->node);
+			kfree(tcache);
+		}
 		list_del(&target->node);
 		kfree(target);
 	}
-- 
2.14.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCHv2 2/2] hmat: Register attributes for memory hot add
  2019-05-15 21:54 [PATCHv2 1/2] hmat: Register memory-side cache after parsing Keith Busch
@ 2019-05-15 21:54 ` Keith Busch
  2019-06-13 20:27 ` [PATCHv2 1/2] hmat: Register memory-side cache after parsing Rafael J. Wysocki
  2019-07-01  8:33 ` Brice Goglin
  2 siblings, 0 replies; 9+ messages in thread
From: Keith Busch @ 2019-05-15 21:54 UTC (permalink / raw)
  To: linux-kernel, linux-acpi, Rafael Wysocki
  Cc: Dan Williams, Dave Hansen, Brice Goglin, Keith Busch

Some of the memory nodes described in HMAT may not be online at the
time the hmat subsystem parses their nodes' attributes. Should the node be
set to online later, as can happen when using PMEM as RAM after boot, the
nodes will be missing their initiator links and performance attributes.

Regsiter a memory notifier callback and register the memory attributes
the first time its node is brought online if it wasn't registered.

Signed-off-by: Keith Busch <keith.busch@intel.com>
---
v1 -> v2:

  Fixed an unintended __init attribute that generated compiler warnings
  (Brice).

 drivers/acpi/hmat/hmat.c | 75 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 57 insertions(+), 18 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index bf23c9a27958..f86fe7130736 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -14,14 +14,18 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/list_sort.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
 #include <linux/node.h>
 #include <linux/sysfs.h>
 
-static __initdata u8 hmat_revision;
+static u8 hmat_revision;
 
-static __initdata LIST_HEAD(targets);
-static __initdata LIST_HEAD(initiators);
-static __initdata LIST_HEAD(localities);
+static LIST_HEAD(targets);
+static LIST_HEAD(initiators);
+static LIST_HEAD(localities);
+
+static DEFINE_MUTEX(target_lock);
 
 /*
  * The defined enum order is used to prioritize attributes to break ties when
@@ -47,6 +51,8 @@ struct memory_target {
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
 	struct list_head caches;
+	struct node_cache_attrs cache_attrs;
+	bool registered;
 };
 
 struct memory_initiator {
@@ -59,7 +65,7 @@ struct memory_locality {
 	struct acpi_hmat_locality *hmat_loc;
 };
 
-static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 {
 	struct memory_initiator *initiator;
 
@@ -69,7 +75,7 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 	return NULL;
 }
 
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static struct memory_target *find_mem_target(unsigned int mem_pxm)
 {
 	struct memory_target *target;
 
@@ -155,7 +161,7 @@ static __init const char *hmat_data_type_suffix(u8 type)
 	}
 }
 
-static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+static u32 hmat_normalize(u16 entry, u64 base, u8 type)
 {
 	u32 value;
 
@@ -190,7 +196,7 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
 	return value;
 }
 
-static __init void hmat_update_target_access(struct memory_target *target,
+static void hmat_update_target_access(struct memory_target *target,
 					     u8 type, u32 value)
 {
 	switch (type) {
@@ -453,7 +459,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
 	return 0;
 }
 
-static __init u32 hmat_initiator_perf(struct memory_target *target,
+static u32 hmat_initiator_perf(struct memory_target *target,
 			       struct memory_initiator *initiator,
 			       struct acpi_hmat_locality *hmat_loc)
 {
@@ -491,7 +497,7 @@ static __init u32 hmat_initiator_perf(struct memory_target *target,
 			      hmat_loc->data_type);
 }
 
-static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+static bool hmat_update_best(u8 type, u32 value, u32 *best)
 {
 	bool updated = false;
 
@@ -535,7 +541,7 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
 	return ia->processor_pxm - ib->processor_pxm;
 }
 
-static __init void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_register_target_initiators(struct memory_target *target)
 {
 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
 	struct memory_initiator *initiator;
@@ -595,7 +601,7 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 	}
 }
 
-static __init void hmat_register_target_cache(struct memory_target *target)
+static void hmat_register_target_cache(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	struct target_cache *tcache;
@@ -604,23 +610,28 @@ static __init void hmat_register_target_cache(struct memory_target *target)
 		node_add_cache(mem_nid, &tcache->cache_attrs);
 }
 
-static __init void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_perf(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
 }
 
-static __init void hmat_register_target(struct memory_target *target)
+static void hmat_register_target(struct memory_target *target)
 {
 	if (!node_online(pxm_to_node(target->memory_pxm)))
 		return;
 
-	hmat_register_target_initiators(target);
-	hmat_register_target_cache(target);
-	hmat_register_target_perf(target);
+	mutex_lock(&target_lock);
+	if (!target->registered) {
+		hmat_register_target_initiators(target);
+		hmat_register_target_cache(target);
+		hmat_register_target_perf(target);
+		target->registered = true;
+	}
+	mutex_unlock(&target_lock);
 }
 
-static __init void hmat_register_targets(void)
+static void hmat_register_targets(void)
 {
 	struct memory_target *target;
 
@@ -628,6 +639,30 @@ static __init void hmat_register_targets(void)
 		hmat_register_target(target);
 }
 
+static int hmat_callback(struct notifier_block *self,
+			 unsigned long action, void *arg)
+{
+	struct memory_target *target;
+	struct memory_notify *mnb = arg;
+	int pxm, nid = mnb->status_change_nid;
+
+	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+		return NOTIFY_OK;
+
+	pxm = node_to_pxm(nid);
+	target = find_mem_target(pxm);
+	if (!target)
+		return NOTIFY_OK;
+
+	hmat_register_target(target);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block hmat_callback_nb = {
+	.notifier_call = hmat_callback,
+	.priority = 2,
+};
+
 static __init void hmat_free_structures(void)
 {
 	struct memory_target *target, *tnext;
@@ -698,6 +733,10 @@ static __init int hmat_init(void)
 		}
 	}
 	hmat_register_targets();
+
+	/* Keep the table and structures if the notifier may use them */
+	if (!register_hotmemory_notifier(&hmat_callback_nb))
+		return 0;
 out_put:
 	hmat_free_structures();
 	acpi_put_table(tbl);
-- 
2.14.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing
  2019-05-15 21:54 [PATCHv2 1/2] hmat: Register memory-side cache after parsing Keith Busch
  2019-05-15 21:54 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
@ 2019-06-13 20:27 ` Rafael J. Wysocki
  2019-07-24 15:32   ` Keith Busch
  2019-07-01  8:33 ` Brice Goglin
  2 siblings, 1 reply; 9+ messages in thread
From: Rafael J. Wysocki @ 2019-06-13 20:27 UTC (permalink / raw)
  To: Keith Busch
  Cc: linux-kernel, linux-acpi, Rafael Wysocki, Dan Williams,
	Dave Hansen, Brice Goglin

On Wednesday, May 15, 2019 11:54:43 PM CEST Keith Busch wrote:
> Instead of registering the hmat cache attributes in line with parsing
> the table, save the attributes in the memory target and register them
> after parsing completes. This will make it easier to register the
> attributes later when hot add is supported.
> 
> Signed-off-by: Keith Busch <keith.busch@intel.com>
> ---
> v1 -> v2:
> 
>   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
>   1 always existed (Brice).
> 
>  drivers/acpi/hmat/hmat.c | 70 +++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 55 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> index 96b7d39a97c6..bf23c9a27958 100644
> --- a/drivers/acpi/hmat/hmat.c
> +++ b/drivers/acpi/hmat/hmat.c
> @@ -36,11 +36,17 @@ enum locality_types {
>  
>  static struct memory_locality *localities_types[4];
>  
> +struct target_cache {
> +	struct list_head node;
> +	struct node_cache_attrs cache_attrs;
> +};
> +
>  struct memory_target {
>  	struct list_head node;
>  	unsigned int memory_pxm;
>  	unsigned int processor_pxm;
>  	struct node_hmem_attrs hmem_attrs;
> +	struct list_head caches;
>  };
>  
>  struct memory_initiator {
> @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
>  	target->memory_pxm = mem_pxm;
>  	target->processor_pxm = PXM_INVAL;
>  	list_add_tail(&target->node, &targets);
> +	INIT_LIST_HEAD(&target->caches);
>  }
>  
>  static __init const char *hmat_data_type(u8 type)
> @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
>  				   const unsigned long end)
>  {
>  	struct acpi_hmat_cache *cache = (void *)header;
> -	struct node_cache_attrs cache_attrs;
> +	struct memory_target *target;
> +	struct target_cache *tcache;
>  	u32 attrs;
>  
>  	if (cache->header.length < sizeof(*cache)) {
> @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
>  		cache->memory_PD, cache->cache_size, attrs,
>  		cache->number_of_SMBIOShandles);
>  
> -	cache_attrs.size = cache->cache_size;
> -	cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> -	cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> +	target = find_mem_target(cache->memory_PD);
> +	if (!target)
> +		return 0;
> +
> +	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> +	if (!tcache) {
> +		pr_notice_once("Failed to allocate HMAT cache info\n");
> +		return 0;
> +	}
> +
> +	tcache->cache_attrs.size = cache->cache_size;
> +	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> +	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
>  
>  	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
>  	case ACPI_HMAT_CA_DIRECT_MAPPED:
> -		cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> +		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
>  		break;
>  	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> -		cache_attrs.indexing = NODE_CACHE_INDEXED;
> +		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
>  		break;
>  	case ACPI_HMAT_CA_NONE:
>  	default:
> -		cache_attrs.indexing = NODE_CACHE_OTHER;
> +		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
>  		break;
>  	}
>  
>  	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
>  	case ACPI_HMAT_CP_WB:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
>  		break;
>  	case ACPI_HMAT_CP_WT:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
>  		break;
>  	case ACPI_HMAT_CP_NONE:
>  	default:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
>  		break;
>  	}
> +	list_add_tail(&tcache->node, &target->caches);
>  
> -	node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
>  	return 0;
>  }
>  
> @@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
>  	}
>  }
>  
> +static __init void hmat_register_target_cache(struct memory_target *target)
> +{
> +	unsigned mem_nid = pxm_to_node(target->memory_pxm);
> +	struct target_cache *tcache;
> +
> +	list_for_each_entry(tcache, &target->caches, node)
> +		node_add_cache(mem_nid, &tcache->cache_attrs);
> +}
> +
>  static __init void hmat_register_target_perf(struct memory_target *target)
>  {
>  	unsigned mem_nid = pxm_to_node(target->memory_pxm);
>  	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
>  }
>  
> +static __init void hmat_register_target(struct memory_target *target)
> +{
> +	if (!node_online(pxm_to_node(target->memory_pxm)))
> +		return;
> +
> +	hmat_register_target_initiators(target);
> +	hmat_register_target_cache(target);
> +	hmat_register_target_perf(target);
> +}
> +
>  static __init void hmat_register_targets(void)
>  {
>  	struct memory_target *target;
>  
> -	list_for_each_entry(target, &targets, node) {
> -		hmat_register_target_initiators(target);
> -		hmat_register_target_perf(target);
> -	}
> +	list_for_each_entry(target, &targets, node)
> +		hmat_register_target(target);
>  }
>  
>  static __init void hmat_free_structures(void)
> @@ -598,8 +633,13 @@ static __init void hmat_free_structures(void)
>  	struct memory_target *target, *tnext;
>  	struct memory_locality *loc, *lnext;
>  	struct memory_initiator *initiator, *inext;
> +	struct target_cache *tcache, *cnext;
>  
>  	list_for_each_entry_safe(target, tnext, &targets, node) {
> +		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
> +			list_del(&tcache->node);
> +			kfree(tcache);
> +		}
>  		list_del(&target->node);
>  		kfree(target);
>  	}
> 

Not sure what to do with this patch and the next one in the series.

FWIW, they both are fine by me.

Also ISTR seeing them in a series from Dan. (?)





^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing
  2019-05-15 21:54 [PATCHv2 1/2] hmat: Register memory-side cache after parsing Keith Busch
  2019-05-15 21:54 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
  2019-06-13 20:27 ` [PATCHv2 1/2] hmat: Register memory-side cache after parsing Rafael J. Wysocki
@ 2019-07-01  8:33 ` Brice Goglin
  2 siblings, 0 replies; 9+ messages in thread
From: Brice Goglin @ 2019-07-01  8:33 UTC (permalink / raw)
  To: Keith Busch, linux-kernel, linux-acpi, Rafael Wysocki
  Cc: Dan Williams, Dave Hansen

Le 15/05/2019 à 23:54, Keith Busch a écrit :
> Instead of registering the hmat cache attributes in line with parsing
> the table, save the attributes in the memory target and register them
> after parsing completes. This will make it easier to register the
> attributes later when hot add is supported.
>
> Signed-off-by: Keith Busch <keith.busch@intel.com>


Sorry for the delay, I finally manage these two patches, they work fine
(tested with a fake HMAT saying that a kmem-hotplug NVDIMM node is local
to 2 initiators among 4 total, and saying that there are 2 memory-side
caches in front of that NVDIMM node).

Tested-by: Brice Goglin <Brice.Goglin@inria.fr>


> ---
> v1 -> v2:
>
>   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
>   1 always existed (Brice).
>
>  drivers/acpi/hmat/hmat.c | 70 +++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 55 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> index 96b7d39a97c6..bf23c9a27958 100644
> --- a/drivers/acpi/hmat/hmat.c
> +++ b/drivers/acpi/hmat/hmat.c
> @@ -36,11 +36,17 @@ enum locality_types {
>  
>  static struct memory_locality *localities_types[4];
>  
> +struct target_cache {
> +	struct list_head node;
> +	struct node_cache_attrs cache_attrs;
> +};
> +
>  struct memory_target {
>  	struct list_head node;
>  	unsigned int memory_pxm;
>  	unsigned int processor_pxm;
>  	struct node_hmem_attrs hmem_attrs;
> +	struct list_head caches;
>  };
>  
>  struct memory_initiator {
> @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
>  	target->memory_pxm = mem_pxm;
>  	target->processor_pxm = PXM_INVAL;
>  	list_add_tail(&target->node, &targets);
> +	INIT_LIST_HEAD(&target->caches);
>  }
>  
>  static __init const char *hmat_data_type(u8 type)
> @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
>  				   const unsigned long end)
>  {
>  	struct acpi_hmat_cache *cache = (void *)header;
> -	struct node_cache_attrs cache_attrs;
> +	struct memory_target *target;
> +	struct target_cache *tcache;
>  	u32 attrs;
>  
>  	if (cache->header.length < sizeof(*cache)) {
> @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
>  		cache->memory_PD, cache->cache_size, attrs,
>  		cache->number_of_SMBIOShandles);
>  
> -	cache_attrs.size = cache->cache_size;
> -	cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> -	cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> +	target = find_mem_target(cache->memory_PD);
> +	if (!target)
> +		return 0;
> +
> +	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> +	if (!tcache) {
> +		pr_notice_once("Failed to allocate HMAT cache info\n");
> +		return 0;
> +	}
> +
> +	tcache->cache_attrs.size = cache->cache_size;
> +	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> +	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
>  
>  	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
>  	case ACPI_HMAT_CA_DIRECT_MAPPED:
> -		cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> +		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
>  		break;
>  	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> -		cache_attrs.indexing = NODE_CACHE_INDEXED;
> +		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
>  		break;
>  	case ACPI_HMAT_CA_NONE:
>  	default:
> -		cache_attrs.indexing = NODE_CACHE_OTHER;
> +		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
>  		break;
>  	}
>  
>  	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
>  	case ACPI_HMAT_CP_WB:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
>  		break;
>  	case ACPI_HMAT_CP_WT:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
>  		break;
>  	case ACPI_HMAT_CP_NONE:
>  	default:
> -		cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
>  		break;
>  	}
> +	list_add_tail(&tcache->node, &target->caches);
>  
> -	node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
>  	return 0;
>  }
>  
> @@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
>  	}
>  }
>  
> +static __init void hmat_register_target_cache(struct memory_target *target)
> +{
> +	unsigned mem_nid = pxm_to_node(target->memory_pxm);
> +	struct target_cache *tcache;
> +
> +	list_for_each_entry(tcache, &target->caches, node)
> +		node_add_cache(mem_nid, &tcache->cache_attrs);
> +}
> +
>  static __init void hmat_register_target_perf(struct memory_target *target)
>  {
>  	unsigned mem_nid = pxm_to_node(target->memory_pxm);
>  	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
>  }
>  
> +static __init void hmat_register_target(struct memory_target *target)
> +{
> +	if (!node_online(pxm_to_node(target->memory_pxm)))
> +		return;
> +
> +	hmat_register_target_initiators(target);
> +	hmat_register_target_cache(target);
> +	hmat_register_target_perf(target);
> +}
> +
>  static __init void hmat_register_targets(void)
>  {
>  	struct memory_target *target;
>  
> -	list_for_each_entry(target, &targets, node) {
> -		hmat_register_target_initiators(target);
> -		hmat_register_target_perf(target);
> -	}
> +	list_for_each_entry(target, &targets, node)
> +		hmat_register_target(target);
>  }
>  
>  static __init void hmat_free_structures(void)
> @@ -598,8 +633,13 @@ static __init void hmat_free_structures(void)
>  	struct memory_target *target, *tnext;
>  	struct memory_locality *loc, *lnext;
>  	struct memory_initiator *initiator, *inext;
> +	struct target_cache *tcache, *cnext;
>  
>  	list_for_each_entry_safe(target, tnext, &targets, node) {
> +		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
> +			list_del(&tcache->node);
> +			kfree(tcache);
> +		}
>  		list_del(&target->node);
>  		kfree(target);
>  	}

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing
  2019-06-13 20:27 ` [PATCHv2 1/2] hmat: Register memory-side cache after parsing Rafael J. Wysocki
@ 2019-07-24 15:32   ` Keith Busch
  0 siblings, 0 replies; 9+ messages in thread
From: Keith Busch @ 2019-07-24 15:32 UTC (permalink / raw)
  To: Rafael J. Wysocki
  Cc: Busch, Keith, linux-kernel, linux-acpi, Rafael Wysocki, Williams,
	Dan J, Hansen, Dave, Brice Goglin

On Thu, Jun 13, 2019 at 01:27:05PM -0700, Rafael J. Wysocki wrote:
> On Wednesday, May 15, 2019 11:54:43 PM CEST Keith Busch wrote:
> > Instead of registering the hmat cache attributes in line with parsing
> > the table, save the attributes in the memory target and register them
> > after parsing completes. This will make it easier to register the
> > attributes later when hot add is supported.
> > 
> > Signed-off-by: Keith Busch <keith.busch@intel.com>
> > ---
> > v1 -> v2:
> > 
> >   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
> >   1 always existed (Brice).
> > 
> >  drivers/acpi/hmat/hmat.c | 70 +++++++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 55 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> > index 96b7d39a97c6..bf23c9a27958 100644
> > --- a/drivers/acpi/hmat/hmat.c
> > +++ b/drivers/acpi/hmat/hmat.c
> > @@ -36,11 +36,17 @@ enum locality_types {
> >  
> >  static struct memory_locality *localities_types[4];
> >  
> > +struct target_cache {
> > +	struct list_head node;
> > +	struct node_cache_attrs cache_attrs;
> > +};
> > +
> >  struct memory_target {
> >  	struct list_head node;
> >  	unsigned int memory_pxm;
> >  	unsigned int processor_pxm;
> >  	struct node_hmem_attrs hmem_attrs;
> > +	struct list_head caches;
> >  };
> >  
> >  struct memory_initiator {
> > @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
> >  	target->memory_pxm = mem_pxm;
> >  	target->processor_pxm = PXM_INVAL;
> >  	list_add_tail(&target->node, &targets);
> > +	INIT_LIST_HEAD(&target->caches);
> >  }
> >  
> >  static __init const char *hmat_data_type(u8 type)
> > @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
> >  				   const unsigned long end)
> >  {
> >  	struct acpi_hmat_cache *cache = (void *)header;
> > -	struct node_cache_attrs cache_attrs;
> > +	struct memory_target *target;
> > +	struct target_cache *tcache;
> >  	u32 attrs;
> >  
> >  	if (cache->header.length < sizeof(*cache)) {
> > @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
> >  		cache->memory_PD, cache->cache_size, attrs,
> >  		cache->number_of_SMBIOShandles);
> >  
> > -	cache_attrs.size = cache->cache_size;
> > -	cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> > -	cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> > +	target = find_mem_target(cache->memory_PD);
> > +	if (!target)
> > +		return 0;
> > +
> > +	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> > +	if (!tcache) {
> > +		pr_notice_once("Failed to allocate HMAT cache info\n");
> > +		return 0;
> > +	}
> > +
> > +	tcache->cache_attrs.size = cache->cache_size;
> > +	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> > +	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> >  
> >  	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
> >  	case ACPI_HMAT_CA_DIRECT_MAPPED:
> > -		cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> > +		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> >  		break;
> >  	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> > -		cache_attrs.indexing = NODE_CACHE_INDEXED;
> > +		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
> >  		break;
> >  	case ACPI_HMAT_CA_NONE:
> >  	default:
> > -		cache_attrs.indexing = NODE_CACHE_OTHER;
> > +		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
> >  		break;
> >  	}
> >  
> >  	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
> >  	case ACPI_HMAT_CP_WB:
> > -		cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> > +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> >  		break;
> >  	case ACPI_HMAT_CP_WT:
> > -		cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> > +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> >  		break;
> >  	case ACPI_HMAT_CP_NONE:
> >  	default:
> > -		cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> > +		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> >  		break;
> >  	}
> > +	list_add_tail(&tcache->node, &target->caches);
> >  
> > -	node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
> >  	return 0;
> >  }
> >  
> > @@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
> >  	}
> >  }
> >  
> > +static __init void hmat_register_target_cache(struct memory_target *target)
> > +{
> > +	unsigned mem_nid = pxm_to_node(target->memory_pxm);
> > +	struct target_cache *tcache;
> > +
> > +	list_for_each_entry(tcache, &target->caches, node)
> > +		node_add_cache(mem_nid, &tcache->cache_attrs);
> > +}
> > +
> >  static __init void hmat_register_target_perf(struct memory_target *target)
> >  {
> >  	unsigned mem_nid = pxm_to_node(target->memory_pxm);
> >  	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
> >  }
> >  
> > +static __init void hmat_register_target(struct memory_target *target)
> > +{
> > +	if (!node_online(pxm_to_node(target->memory_pxm)))
> > +		return;
> > +
> > +	hmat_register_target_initiators(target);
> > +	hmat_register_target_cache(target);
> > +	hmat_register_target_perf(target);
> > +}
> > +
> >  static __init void hmat_register_targets(void)
> >  {
> >  	struct memory_target *target;
> >  
> > -	list_for_each_entry(target, &targets, node) {
> > -		hmat_register_target_initiators(target);
> > -		hmat_register_target_perf(target);
> > -	}
> > +	list_for_each_entry(target, &targets, node)
> > +		hmat_register_target(target);
> >  }
> >  
> >  static __init void hmat_free_structures(void)
> > @@ -598,8 +633,13 @@ static __init void hmat_free_structures(void)
> >  	struct memory_target *target, *tnext;
> >  	struct memory_locality *loc, *lnext;
> >  	struct memory_initiator *initiator, *inext;
> > +	struct target_cache *tcache, *cnext;
> >  
> >  	list_for_each_entry_safe(target, tnext, &targets, node) {
> > +		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
> > +			list_del(&tcache->node);
> > +			kfree(tcache);
> > +		}
> >  		list_del(&target->node);
> >  		kfree(target);
> >  	}
> > 
> 
> Not sure what to do with this patch and the next one in the series.
> 
> FWIW, they both are fine by me.
> 
> Also ISTR seeing them in a series from Dan. (?)

I see Dan provided a series for EFI specific purpose support, but I
didn't find anything else touching this part of the code. FWIW, Dan's
EFI series looks goot to me, and I can rebase the hot-add support this
series provides on top of that.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 2/2] hmat: Register attributes for memory hot add
       [not found]   ` <9f130b73-e5ae-0529-69a1-28bd2ca29581@inria.fr>
@ 2019-04-16 15:01     ` Keith Busch
  2019-04-16 15:01       ` Keith Busch
  0 siblings, 1 reply; 9+ messages in thread
From: Keith Busch @ 2019-04-16 15:01 UTC (permalink / raw)
  To: Brice Goglin
  Cc: linux-kernel, linux-acpi, linux-mm, Rafael Wysocki, Dave Hansen,
	Dan Williams

On Tue, Apr 16, 2019 at 04:55:21PM +0200, Brice Goglin wrote:
> Hello Keith
> 
> Several issues:
> 
> * We always get a memory_side_cache, even if nothing was found in ACPI.
>   You should at least ignore the cache if size==0?
> 
> * Your code seems to only work with a single level of cache, since
>   there's a single cache_attrs entry in each target structure.
> 
> * I was getting a section mismatch warning and a crash on PMEM node
>   hotplug until I applied the patch below.
> 
> WARNING: vmlinux.o(.text+0x47d3f7): Section mismatch in reference from the function hmat_callback() to the function .init.text:hmat_register_target()
> The function hmat_callback() references
> the function __init hmat_register_target().
> This is often because hmat_callback lacks a __init 
> annotation or the annotation of hmat_register_target is wrong.
> 
> Thanks
> 
> Brice

Oh, thanks for the notice. I'll add multi-level and no-caches into my
test, as I had it fixed to one. Will need to respin this series.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 2/2] hmat: Register attributes for memory hot add
  2019-04-16 15:01     ` Keith Busch
@ 2019-04-16 15:01       ` Keith Busch
  0 siblings, 0 replies; 9+ messages in thread
From: Keith Busch @ 2019-04-16 15:01 UTC (permalink / raw)
  To: Brice Goglin
  Cc: linux-kernel, linux-acpi, linux-mm, Rafael Wysocki, Dave Hansen,
	Dan Williams

On Tue, Apr 16, 2019 at 04:55:21PM +0200, Brice Goglin wrote:
> Hello Keith
> 
> Several issues:
> 
> * We always get a memory_side_cache, even if nothing was found in ACPI.
>   You should at least ignore the cache if size==0?
> 
> * Your code seems to only work with a single level of cache, since
>   there's a single cache_attrs entry in each target structure.
> 
> * I was getting a section mismatch warning and a crash on PMEM node
>   hotplug until I applied the patch below.
> 
> WARNING: vmlinux.o(.text+0x47d3f7): Section mismatch in reference from the function hmat_callback() to the function .init.text:hmat_register_target()
> The function hmat_callback() references
> the function __init hmat_register_target().
> This is often because hmat_callback lacks a __init 
> annotation or the annotation of hmat_register_target is wrong.
> 
> Thanks
> 
> Brice

Oh, thanks for the notice. I'll add multi-level and no-caches into my
test, as I had it fixed to one. Will need to respin this series.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCHv2 2/2] hmat: Register attributes for memory hot add
  2019-04-15 15:16 [PATCHv2 0/2] HMAT memroy hotplug support Keith Busch
@ 2019-04-15 15:16 ` Keith Busch
  2019-04-15 15:16   ` Keith Busch
       [not found]   ` <9f130b73-e5ae-0529-69a1-28bd2ca29581@inria.fr>
  0 siblings, 2 replies; 9+ messages in thread
From: Keith Busch @ 2019-04-15 15:16 UTC (permalink / raw)
  To: linux-kernel, linux-acpi, linux-mm
  Cc: Rafael Wysocki, Dave Hansen, Dan Williams, Brice Goglin, Keith Busch

Some memory nodes described in HMAT may not be online at the time the
we parse the subtables. Should the node be set to online later, as can
happen when using PMEM as RAM after boot, the nodes will be missing
their initiator links and performance attributes.

Register a memory notifier callback and register the memory attributes
the first time its node is brought online if it wasn't registered,
ensuring a node's attributes may be registered only once.

Reported-by: Brice Goglin <Brice.Goglin@inria.fr>
Signed-off-by: Keith Busch <keith.busch@intel.com>
---
 drivers/acpi/hmat/hmat.c | 72 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 55 insertions(+), 17 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index bdb167c026ff..4fcfad6c2181 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -14,14 +14,18 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/list_sort.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
 #include <linux/node.h>
 #include <linux/sysfs.h>
 
-static __initdata u8 hmat_revision;
+static u8 hmat_revision;
 
-static __initdata LIST_HEAD(targets);
-static __initdata LIST_HEAD(initiators);
-static __initdata LIST_HEAD(localities);
+static LIST_HEAD(targets);
+static LIST_HEAD(initiators);
+static LIST_HEAD(localities);
+
+static DEFINE_MUTEX(target_lock);
 
 /*
  * The defined enum order is used to prioritize attributes to break ties when
@@ -42,6 +46,7 @@ struct memory_target {
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
 	struct node_cache_attrs cache_attrs;
+	bool registered;
 };
 
 struct memory_initiator {
@@ -54,7 +59,7 @@ struct memory_locality {
 	struct acpi_hmat_locality *hmat_loc;
 };
 
-static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 {
 	struct memory_initiator *initiator;
 
@@ -64,7 +69,7 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 	return NULL;
 }
 
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static struct memory_target *find_mem_target(unsigned int mem_pxm)
 {
 	struct memory_target *target;
 
@@ -149,7 +154,7 @@ static __init const char *hmat_data_type_suffix(u8 type)
 	}
 }
 
-static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+static u32 hmat_normalize(u16 entry, u64 base, u8 type)
 {
 	u32 value;
 
@@ -184,7 +189,7 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
 	return value;
 }
 
-static __init void hmat_update_target_access(struct memory_target *target,
+static void hmat_update_target_access(struct memory_target *target,
 					     u8 type, u32 value)
 {
 	switch (type) {
@@ -439,7 +444,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
 	return 0;
 }
 
-static __init u32 hmat_initiator_perf(struct memory_target *target,
+static u32 hmat_initiator_perf(struct memory_target *target,
 			       struct memory_initiator *initiator,
 			       struct acpi_hmat_locality *hmat_loc)
 {
@@ -477,7 +482,7 @@ static __init u32 hmat_initiator_perf(struct memory_target *target,
 			      hmat_loc->data_type);
 }
 
-static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+static bool hmat_update_best(u8 type, u32 value, u32 *best)
 {
 	bool updated = false;
 
@@ -521,7 +526,7 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
 	return ia->processor_pxm - ib->processor_pxm;
 }
 
-static __init void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_register_target_initiators(struct memory_target *target)
 {
 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
 	struct memory_initiator *initiator;
@@ -581,13 +586,13 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 	}
 }
 
-static __init void hmat_register_target_cache(struct memory_target *target)
+static void hmat_register_target_cache(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_add_cache(mem_nid, &target->cache_attrs);
 }
 
-static __init void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_perf(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
@@ -598,12 +603,17 @@ static __init void hmat_register_target(struct memory_target *target)
 	if (!node_online(pxm_to_node(target->memory_pxm)))
 		return;
 
-	hmat_register_target_initiators(target);
-	hmat_register_target_cache(target);
-	hmat_register_target_perf(target);
+	mutex_lock(&target_lock);
+	if (!target->registered) {
+		hmat_register_target_initiators(target);
+		hmat_register_target_cache(target);
+		hmat_register_target_perf(target);
+		target->registered = true;
+	}
+	mutex_unlock(&target_lock);
 }
 
-static __init void hmat_register_targets(void)
+static void hmat_register_targets(void)
 {
 	struct memory_target *target;
 
@@ -611,6 +621,30 @@ static __init void hmat_register_targets(void)
 		hmat_register_target(target);
 }
 
+static int hmat_callback(struct notifier_block *self,
+			 unsigned long action, void *arg)
+{
+	struct memory_target *target;
+	struct memory_notify *mnb = arg;
+	int pxm, nid = mnb->status_change_nid;
+
+	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+		return NOTIFY_OK;
+
+	pxm = node_to_pxm(nid);
+	target = find_mem_target(pxm);
+	if (!target)
+		return NOTIFY_OK;
+
+	hmat_register_target(target);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block hmat_callback_nb = {
+	.notifier_call = hmat_callback,
+	.priority = 2,
+};
+
 static __init void hmat_free_structures(void)
 {
 	struct memory_target *target, *tnext;
@@ -676,6 +710,10 @@ static __init int hmat_init(void)
 		}
 	}
 	hmat_register_targets();
+
+	/* Keep the table and structures if the notifier may use them */
+	if (!register_hotmemory_notifier(&hmat_callback_nb))
+		return 0;
 out_put:
 	hmat_free_structures();
 	acpi_put_table(tbl);
-- 
2.14.4

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCHv2 2/2] hmat: Register attributes for memory hot add
  2019-04-15 15:16 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
@ 2019-04-15 15:16   ` Keith Busch
       [not found]   ` <9f130b73-e5ae-0529-69a1-28bd2ca29581@inria.fr>
  1 sibling, 0 replies; 9+ messages in thread
From: Keith Busch @ 2019-04-15 15:16 UTC (permalink / raw)
  To: linux-kernel, linux-acpi, linux-mm
  Cc: Rafael Wysocki, Dave Hansen, Dan Williams, Brice Goglin, Keith Busch

Some memory nodes described in HMAT may not be online at the time the
we parse the subtables. Should the node be set to online later, as can
happen when using PMEM as RAM after boot, the nodes will be missing
their initiator links and performance attributes.

Register a memory notifier callback and register the memory attributes
the first time its node is brought online if it wasn't registered,
ensuring a node's attributes may be registered only once.

Reported-by: Brice Goglin <Brice.Goglin@inria.fr>
Signed-off-by: Keith Busch <keith.busch@intel.com>
---
 drivers/acpi/hmat/hmat.c | 72 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 55 insertions(+), 17 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index bdb167c026ff..4fcfad6c2181 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -14,14 +14,18 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/list_sort.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
 #include <linux/node.h>
 #include <linux/sysfs.h>
 
-static __initdata u8 hmat_revision;
+static u8 hmat_revision;
 
-static __initdata LIST_HEAD(targets);
-static __initdata LIST_HEAD(initiators);
-static __initdata LIST_HEAD(localities);
+static LIST_HEAD(targets);
+static LIST_HEAD(initiators);
+static LIST_HEAD(localities);
+
+static DEFINE_MUTEX(target_lock);
 
 /*
  * The defined enum order is used to prioritize attributes to break ties when
@@ -42,6 +46,7 @@ struct memory_target {
 	unsigned int processor_pxm;
 	struct node_hmem_attrs hmem_attrs;
 	struct node_cache_attrs cache_attrs;
+	bool registered;
 };
 
 struct memory_initiator {
@@ -54,7 +59,7 @@ struct memory_locality {
 	struct acpi_hmat_locality *hmat_loc;
 };
 
-static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 {
 	struct memory_initiator *initiator;
 
@@ -64,7 +69,7 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
 	return NULL;
 }
 
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static struct memory_target *find_mem_target(unsigned int mem_pxm)
 {
 	struct memory_target *target;
 
@@ -149,7 +154,7 @@ static __init const char *hmat_data_type_suffix(u8 type)
 	}
 }
 
-static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+static u32 hmat_normalize(u16 entry, u64 base, u8 type)
 {
 	u32 value;
 
@@ -184,7 +189,7 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
 	return value;
 }
 
-static __init void hmat_update_target_access(struct memory_target *target,
+static void hmat_update_target_access(struct memory_target *target,
 					     u8 type, u32 value)
 {
 	switch (type) {
@@ -439,7 +444,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
 	return 0;
 }
 
-static __init u32 hmat_initiator_perf(struct memory_target *target,
+static u32 hmat_initiator_perf(struct memory_target *target,
 			       struct memory_initiator *initiator,
 			       struct acpi_hmat_locality *hmat_loc)
 {
@@ -477,7 +482,7 @@ static __init u32 hmat_initiator_perf(struct memory_target *target,
 			      hmat_loc->data_type);
 }
 
-static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+static bool hmat_update_best(u8 type, u32 value, u32 *best)
 {
 	bool updated = false;
 
@@ -521,7 +526,7 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
 	return ia->processor_pxm - ib->processor_pxm;
 }
 
-static __init void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_register_target_initiators(struct memory_target *target)
 {
 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
 	struct memory_initiator *initiator;
@@ -581,13 +586,13 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
 	}
 }
 
-static __init void hmat_register_target_cache(struct memory_target *target)
+static void hmat_register_target_cache(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_add_cache(mem_nid, &target->cache_attrs);
 }
 
-static __init void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_perf(struct memory_target *target)
 {
 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
 	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
@@ -598,12 +603,17 @@ static __init void hmat_register_target(struct memory_target *target)
 	if (!node_online(pxm_to_node(target->memory_pxm)))
 		return;
 
-	hmat_register_target_initiators(target);
-	hmat_register_target_cache(target);
-	hmat_register_target_perf(target);
+	mutex_lock(&target_lock);
+	if (!target->registered) {
+		hmat_register_target_initiators(target);
+		hmat_register_target_cache(target);
+		hmat_register_target_perf(target);
+		target->registered = true;
+	}
+	mutex_unlock(&target_lock);
 }
 
-static __init void hmat_register_targets(void)
+static void hmat_register_targets(void)
 {
 	struct memory_target *target;
 
@@ -611,6 +621,30 @@ static __init void hmat_register_targets(void)
 		hmat_register_target(target);
 }
 
+static int hmat_callback(struct notifier_block *self,
+			 unsigned long action, void *arg)
+{
+	struct memory_target *target;
+	struct memory_notify *mnb = arg;
+	int pxm, nid = mnb->status_change_nid;
+
+	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+		return NOTIFY_OK;
+
+	pxm = node_to_pxm(nid);
+	target = find_mem_target(pxm);
+	if (!target)
+		return NOTIFY_OK;
+
+	hmat_register_target(target);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block hmat_callback_nb = {
+	.notifier_call = hmat_callback,
+	.priority = 2,
+};
+
 static __init void hmat_free_structures(void)
 {
 	struct memory_target *target, *tnext;
@@ -676,6 +710,10 @@ static __init int hmat_init(void)
 		}
 	}
 	hmat_register_targets();
+
+	/* Keep the table and structures if the notifier may use them */
+	if (!register_hotmemory_notifier(&hmat_callback_nb))
+		return 0;
 out_put:
 	hmat_free_structures();
 	acpi_put_table(tbl);
-- 
2.14.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-07-24 15:35 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-15 21:54 [PATCHv2 1/2] hmat: Register memory-side cache after parsing Keith Busch
2019-05-15 21:54 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
2019-06-13 20:27 ` [PATCHv2 1/2] hmat: Register memory-side cache after parsing Rafael J. Wysocki
2019-07-24 15:32   ` Keith Busch
2019-07-01  8:33 ` Brice Goglin
  -- strict thread matches above, loose matches on Subject: below --
2019-04-15 15:16 [PATCHv2 0/2] HMAT memroy hotplug support Keith Busch
2019-04-15 15:16 ` [PATCHv2 2/2] hmat: Register attributes for memory hot add Keith Busch
2019-04-15 15:16   ` Keith Busch
     [not found]   ` <9f130b73-e5ae-0529-69a1-28bd2ca29581@inria.fr>
2019-04-16 15:01     ` Keith Busch
2019-04-16 15:01       ` Keith Busch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).