* [PATCH 0/4] powerpc/pseries: Refactor code to centralize drmem feature
@ 2018-11-27 20:31 Michael Bringmann
2018-11-27 20:32 ` [PATCH 1/4] powerpc/pseries: Relocate drmem.c to pseries Michael Bringmann
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Michael Bringmann @ 2018-11-27 20:31 UTC (permalink / raw)
To: linuxppc-dev, linuxppc-dev; +Cc: mwb, minkim, tlfalcon, tyreld
The implementation of the pseries-specific dynamic memory features
is currently implemented in several non-pseries-specific files.
This patch set moves the implementation of the device-tree parsing
code for the properties ibm,dynamic-memory, ibm,dynamic-memory-v2,
and its representation in the kernel into the platform-specific
directory to the Pseries features.
Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
Michael Bringmann (4):
powerpc/pseries: Relocate drmem.c to pseries
powerpc/pseries: Move DRMEM processing out of prom.c
powerpc/pseries: Move DRMEM processing out of numa.c
powerpc/pseries: Relocate drmem.h to pseries
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/4] powerpc/pseries: Relocate drmem.c to pseries
2018-11-27 20:31 [PATCH 0/4] powerpc/pseries: Refactor code to centralize drmem feature Michael Bringmann
@ 2018-11-27 20:32 ` Michael Bringmann
2018-11-27 20:37 ` [PATCH 4/4] powerpc/pseries: Move DRMEM processing out of numa.c Michael Bringmann
2018-11-27 20:39 ` [PATCH 4/4] powerpc/pseries: Relocate drmem.h to pseries Michael Bringmann
2 siblings, 0 replies; 4+ messages in thread
From: Michael Bringmann @ 2018-11-27 20:32 UTC (permalink / raw)
To: linuxppc-dev, linuxppc-dev; +Cc: mwb, minkim, tlfalcon, tyreld
The implementation of the pseries-specific dynamic memory features
is currently implemented in several non-pseries-specific files.
This patch set moves the implementation of the device-tree parsing
code for the properties ibm,dynamic-memory, ibm,dynamic-memory-v2,
and its representation in the kernel into the platform-specific
directory to the Pseries features.
This patch moves drmem.c from kernel directory arch/powerpc/mm to
powerpc/platforms/pseries.
Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
---
arch/powerpc/mm/Makefile | 2
arch/powerpc/mm/drmem.c | 447 -------------------------------
arch/powerpc/platforms/pseries/Makefile | 3
arch/powerpc/platforms/pseries/drmem.c | 447 +++++++++++++++++++++++++++++++
4 files changed, 450 insertions(+), 449 deletions(-)
delete mode 100644 arch/powerpc/mm/drmem.c
create mode 100644 arch/powerpc/platforms/pseries/drmem.c
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index ca96e7b..06281e0f 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -9,7 +9,7 @@ CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
- init-common.o mmu_context.o drmem.o
+ init-common.o mmu_context.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
deleted file mode 100644
index 3f18036..0000000
--- a/arch/powerpc/mm/drmem.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Dynamic reconfiguration memory support
- *
- * Copyright 2017 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) "drmem: " fmt
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/memblock.h>
-#include <asm/prom.h>
-#include <asm/drmem.h>
-
-static struct drmem_lmb_info __drmem_info;
-struct drmem_lmb_info *drmem_info = &__drmem_info;
-
-u64 drmem_lmb_memory_max(void)
-{
- struct drmem_lmb *last_lmb;
-
- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
- return last_lmb->base_addr + drmem_lmb_size();
-}
-
-static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
-{
- /*
- * Return the value of the lmb flags field minus the reserved
- * bit used internally for hotplug processing.
- */
- return lmb->flags & ~DRMEM_LMB_RESERVED;
-}
-
-static struct property *clone_property(struct property *prop, u32 prop_sz)
-{
- struct property *new_prop;
-
- new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
- if (!new_prop)
- return NULL;
-
- new_prop->name = kstrdup(prop->name, GFP_KERNEL);
- new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
- if (!new_prop->name || !new_prop->value) {
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
- return NULL;
- }
-
- new_prop->length = prop_sz;
-#if defined(CONFIG_OF_DYNAMIC)
- of_property_set_flag(new_prop, OF_DYNAMIC);
-#endif
- return new_prop;
-}
-
-static int drmem_update_dt_v1(struct device_node *memory,
- struct property *prop)
-{
- struct property *new_prop;
- struct of_drconf_cell_v1 *dr_cell;
- struct drmem_lmb *lmb;
- u32 *p;
-
- new_prop = clone_property(prop, prop->length);
- if (!new_prop)
- return -1;
-
- p = new_prop->value;
- *p++ = cpu_to_be32(drmem_info->n_lmbs);
-
- dr_cell = (struct of_drconf_cell_v1 *)p;
-
- for_each_drmem_lmb(lmb) {
- dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
- dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
- dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
- dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
-
- dr_cell++;
- }
-
- of_update_property(memory, new_prop);
- return 0;
-}
-
-static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
- struct drmem_lmb *lmb)
-{
- dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
- dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
- dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
- dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
-}
-
-static int drmem_update_dt_v2(struct device_node *memory,
- struct property *prop)
-{
- struct property *new_prop;
- struct of_drconf_cell_v2 *dr_cell;
- struct drmem_lmb *lmb, *prev_lmb;
- u32 lmb_sets, prop_sz, seq_lmbs;
- u32 *p;
-
- /* First pass, determine how many LMB sets are needed. */
- lmb_sets = 0;
- prev_lmb = NULL;
- for_each_drmem_lmb(lmb) {
- if (!prev_lmb) {
- prev_lmb = lmb;
- lmb_sets++;
- continue;
- }
-
- if (prev_lmb->aa_index != lmb->aa_index ||
- drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
- lmb_sets++;
-
- prev_lmb = lmb;
- }
-
- prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
- new_prop = clone_property(prop, prop_sz);
- if (!new_prop)
- return -1;
-
- p = new_prop->value;
- *p++ = cpu_to_be32(lmb_sets);
-
- dr_cell = (struct of_drconf_cell_v2 *)p;
-
- /* Second pass, populate the LMB set data */
- prev_lmb = NULL;
- seq_lmbs = 0;
- for_each_drmem_lmb(lmb) {
- if (prev_lmb == NULL) {
- /* Start of first LMB set */
- prev_lmb = lmb;
- init_drconf_v2_cell(dr_cell, lmb);
- seq_lmbs++;
- continue;
- }
-
- if (prev_lmb->aa_index != lmb->aa_index ||
- drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
- /* end of one set, start of another */
- dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
- dr_cell++;
-
- init_drconf_v2_cell(dr_cell, lmb);
- seq_lmbs = 1;
- } else {
- seq_lmbs++;
- }
-
- prev_lmb = lmb;
- }
-
- /* close out last LMB set */
- dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
- of_update_property(memory, new_prop);
- return 0;
-}
-
-int drmem_update_dt(void)
-{
- struct device_node *memory;
- struct property *prop;
- int rc = -1;
-
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (!memory)
- return -1;
-
- prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
- if (prop) {
- rc = drmem_update_dt_v1(memory, prop);
- } else {
- prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
- if (prop)
- rc = drmem_update_dt_v2(memory, prop);
- }
-
- of_node_put(memory);
- return rc;
-}
-
-static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
- const __be32 **prop)
-{
- const __be32 *p = *prop;
-
- lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
- lmb->drc_index = of_read_number(p++, 1);
-
- p++; /* skip reserved field */
-
- lmb->aa_index = of_read_number(p++, 1);
- lmb->flags = of_read_number(p++, 1);
-
- *prop = p;
-}
-
-static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
- void (*func)(struct drmem_lmb *, const __be32 **))
-{
- struct drmem_lmb lmb;
- u32 i, n_lmbs;
-
- n_lmbs = of_read_number(prop++, 1);
- if (n_lmbs == 0)
- return;
-
- for (i = 0; i < n_lmbs; i++) {
- read_drconf_v1_cell(&lmb, &prop);
- func(&lmb, &usm);
- }
-}
-
-static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
- const __be32 **prop)
-{
- const __be32 *p = *prop;
-
- dr_cell->seq_lmbs = of_read_number(p++, 1);
- dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
- dr_cell->drc_index = of_read_number(p++, 1);
- dr_cell->aa_index = of_read_number(p++, 1);
- dr_cell->flags = of_read_number(p++, 1);
-
- *prop = p;
-}
-
-static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
- void (*func)(struct drmem_lmb *, const __be32 **))
-{
- struct of_drconf_cell_v2 dr_cell;
- struct drmem_lmb lmb;
- u32 i, j, lmb_sets;
-
- lmb_sets = of_read_number(prop++, 1);
- if (lmb_sets == 0)
- return;
-
- for (i = 0; i < lmb_sets; i++) {
- read_drconf_v2_cell(&dr_cell, &prop);
-
- for (j = 0; j < dr_cell.seq_lmbs; j++) {
- lmb.base_addr = dr_cell.base_addr;
- dr_cell.base_addr += drmem_lmb_size();
-
- lmb.drc_index = dr_cell.drc_index;
- dr_cell.drc_index++;
-
- lmb.aa_index = dr_cell.aa_index;
- lmb.flags = dr_cell.flags;
-
- func(&lmb, &usm);
- }
- }
-}
-
-#ifdef CONFIG_PPC_PSERIES
-void __init walk_drmem_lmbs_early(unsigned long node,
- void (*func)(struct drmem_lmb *, const __be32 **))
-{
- const __be32 *prop, *usm;
- int len;
-
- prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
- if (!prop || len < dt_root_size_cells * sizeof(__be32))
- return;
-
- drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
-
- usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
-
- prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
- if (prop) {
- __walk_drmem_v1_lmbs(prop, usm, func);
- } else {
- prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
- &len);
- if (prop)
- __walk_drmem_v2_lmbs(prop, usm, func);
- }
-
- memblock_dump_all();
-}
-
-#endif
-
-static int __init init_drmem_lmb_size(struct device_node *dn)
-{
- const __be32 *prop;
- int len;
-
- if (drmem_info->lmb_size)
- return 0;
-
- prop = of_get_property(dn, "ibm,lmb-size", &len);
- if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
- pr_info("Could not determine LMB size\n");
- return -1;
- }
-
- drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
- return 0;
-}
-
-/*
- * Returns the property linux,drconf-usable-memory if
- * it exists (the property exists only in kexec/kdump kernels,
- * added by kexec-tools)
- */
-static const __be32 *of_get_usable_memory(struct device_node *dn)
-{
- const __be32 *prop;
- u32 len;
-
- prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
- if (!prop || len < sizeof(unsigned int))
- return NULL;
-
- return prop;
-}
-
-void __init walk_drmem_lmbs(struct device_node *dn,
- void (*func)(struct drmem_lmb *, const __be32 **))
-{
- const __be32 *prop, *usm;
-
- if (init_drmem_lmb_size(dn))
- return;
-
- usm = of_get_usable_memory(dn);
-
- prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
- if (prop) {
- __walk_drmem_v1_lmbs(prop, usm, func);
- } else {
- prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
- if (prop)
- __walk_drmem_v2_lmbs(prop, usm, func);
- }
-}
-
-static void __init init_drmem_v1_lmbs(const __be32 *prop)
-{
- struct drmem_lmb *lmb;
-
- drmem_info->n_lmbs = of_read_number(prop++, 1);
- if (drmem_info->n_lmbs == 0)
- return;
-
- drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
- GFP_KERNEL);
- if (!drmem_info->lmbs)
- return;
-
- for_each_drmem_lmb(lmb)
- read_drconf_v1_cell(lmb, &prop);
-}
-
-static void __init init_drmem_v2_lmbs(const __be32 *prop)
-{
- struct drmem_lmb *lmb;
- struct of_drconf_cell_v2 dr_cell;
- const __be32 *p;
- u32 i, j, lmb_sets;
- int lmb_index;
-
- lmb_sets = of_read_number(prop++, 1);
- if (lmb_sets == 0)
- return;
-
- /* first pass, calculate the number of LMBs */
- p = prop;
- for (i = 0; i < lmb_sets; i++) {
- read_drconf_v2_cell(&dr_cell, &p);
- drmem_info->n_lmbs += dr_cell.seq_lmbs;
- }
-
- drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
- GFP_KERNEL);
- if (!drmem_info->lmbs)
- return;
-
- /* second pass, read in the LMB information */
- lmb_index = 0;
- p = prop;
-
- for (i = 0; i < lmb_sets; i++) {
- read_drconf_v2_cell(&dr_cell, &p);
-
- for (j = 0; j < dr_cell.seq_lmbs; j++) {
- lmb = &drmem_info->lmbs[lmb_index++];
-
- lmb->base_addr = dr_cell.base_addr;
- dr_cell.base_addr += drmem_info->lmb_size;
-
- lmb->drc_index = dr_cell.drc_index;
- dr_cell.drc_index++;
-
- lmb->aa_index = dr_cell.aa_index;
- lmb->flags = dr_cell.flags;
- }
- }
-}
-
-static int __init drmem_init(void)
-{
- struct device_node *dn;
- const __be32 *prop;
-
- dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (!dn) {
- pr_info("No dynamic reconfiguration memory found\n");
- return 0;
- }
-
- if (init_drmem_lmb_size(dn)) {
- of_node_put(dn);
- return 0;
- }
-
- prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
- if (prop) {
- init_drmem_v1_lmbs(prop);
- } else {
- prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
- if (prop)
- init_drmem_v2_lmbs(prop);
- }
-
- of_node_put(dn);
- return 0;
-}
-late_initcall(drmem_init);
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index a43ec84..4278690 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,7 +6,8 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
of_helpers.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o rng.o \
- pci.o pci_dlpar.o eeh_pseries.o msi.o
+ pci.o pci_dlpar.o eeh_pseries.o msi.o \
+ drmem.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o
diff --git a/arch/powerpc/platforms/pseries/drmem.c b/arch/powerpc/platforms/pseries/drmem.c
new file mode 100644
index 0000000..3f18036
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/drmem.c
@@ -0,0 +1,447 @@
+/*
+ * Dynamic reconfiguration memory support
+ *
+ * Copyright 2017 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "drmem: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/memblock.h>
+#include <asm/prom.h>
+#include <asm/drmem.h>
+
+static struct drmem_lmb_info __drmem_info;
+struct drmem_lmb_info *drmem_info = &__drmem_info;
+
+u64 drmem_lmb_memory_max(void)
+{
+ struct drmem_lmb *last_lmb;
+
+ last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
+ return last_lmb->base_addr + drmem_lmb_size();
+}
+
+static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
+{
+ /*
+ * Return the value of the lmb flags field minus the reserved
+ * bit used internally for hotplug processing.
+ */
+ return lmb->flags & ~DRMEM_LMB_RESERVED;
+}
+
+static struct property *clone_property(struct property *prop, u32 prop_sz)
+{
+ struct property *new_prop;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+ return NULL;
+ }
+
+ new_prop->length = prop_sz;
+#if defined(CONFIG_OF_DYNAMIC)
+ of_property_set_flag(new_prop, OF_DYNAMIC);
+#endif
+ return new_prop;
+}
+
+static int drmem_update_dt_v1(struct device_node *memory,
+ struct property *prop)
+{
+ struct property *new_prop;
+ struct of_drconf_cell_v1 *dr_cell;
+ struct drmem_lmb *lmb;
+ u32 *p;
+
+ new_prop = clone_property(prop, prop->length);
+ if (!new_prop)
+ return -1;
+
+ p = new_prop->value;
+ *p++ = cpu_to_be32(drmem_info->n_lmbs);
+
+ dr_cell = (struct of_drconf_cell_v1 *)p;
+
+ for_each_drmem_lmb(lmb) {
+ dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
+ dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
+ dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
+ dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
+
+ dr_cell++;
+ }
+
+ of_update_property(memory, new_prop);
+ return 0;
+}
+
+static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
+ struct drmem_lmb *lmb)
+{
+ dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
+ dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
+ dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
+ dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
+}
+
+static int drmem_update_dt_v2(struct device_node *memory,
+ struct property *prop)
+{
+ struct property *new_prop;
+ struct of_drconf_cell_v2 *dr_cell;
+ struct drmem_lmb *lmb, *prev_lmb;
+ u32 lmb_sets, prop_sz, seq_lmbs;
+ u32 *p;
+
+ /* First pass, determine how many LMB sets are needed. */
+ lmb_sets = 0;
+ prev_lmb = NULL;
+ for_each_drmem_lmb(lmb) {
+ if (!prev_lmb) {
+ prev_lmb = lmb;
+ lmb_sets++;
+ continue;
+ }
+
+ if (prev_lmb->aa_index != lmb->aa_index ||
+ drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
+ lmb_sets++;
+
+ prev_lmb = lmb;
+ }
+
+ prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
+ new_prop = clone_property(prop, prop_sz);
+ if (!new_prop)
+ return -1;
+
+ p = new_prop->value;
+ *p++ = cpu_to_be32(lmb_sets);
+
+ dr_cell = (struct of_drconf_cell_v2 *)p;
+
+ /* Second pass, populate the LMB set data */
+ prev_lmb = NULL;
+ seq_lmbs = 0;
+ for_each_drmem_lmb(lmb) {
+ if (prev_lmb == NULL) {
+ /* Start of first LMB set */
+ prev_lmb = lmb;
+ init_drconf_v2_cell(dr_cell, lmb);
+ seq_lmbs++;
+ continue;
+ }
+
+ if (prev_lmb->aa_index != lmb->aa_index ||
+ drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
+ /* end of one set, start of another */
+ dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
+ dr_cell++;
+
+ init_drconf_v2_cell(dr_cell, lmb);
+ seq_lmbs = 1;
+ } else {
+ seq_lmbs++;
+ }
+
+ prev_lmb = lmb;
+ }
+
+ /* close out last LMB set */
+ dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
+ of_update_property(memory, new_prop);
+ return 0;
+}
+
+int drmem_update_dt(void)
+{
+ struct device_node *memory;
+ struct property *prop;
+ int rc = -1;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!memory)
+ return -1;
+
+ prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
+ if (prop) {
+ rc = drmem_update_dt_v1(memory, prop);
+ } else {
+ prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
+ if (prop)
+ rc = drmem_update_dt_v2(memory, prop);
+ }
+
+ of_node_put(memory);
+ return rc;
+}
+
+static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
+ const __be32 **prop)
+{
+ const __be32 *p = *prop;
+
+ lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
+ lmb->drc_index = of_read_number(p++, 1);
+
+ p++; /* skip reserved field */
+
+ lmb->aa_index = of_read_number(p++, 1);
+ lmb->flags = of_read_number(p++, 1);
+
+ *prop = p;
+}
+
+static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
+ void (*func)(struct drmem_lmb *, const __be32 **))
+{
+ struct drmem_lmb lmb;
+ u32 i, n_lmbs;
+
+ n_lmbs = of_read_number(prop++, 1);
+ if (n_lmbs == 0)
+ return;
+
+ for (i = 0; i < n_lmbs; i++) {
+ read_drconf_v1_cell(&lmb, &prop);
+ func(&lmb, &usm);
+ }
+}
+
+static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
+ const __be32 **prop)
+{
+ const __be32 *p = *prop;
+
+ dr_cell->seq_lmbs = of_read_number(p++, 1);
+ dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
+ dr_cell->drc_index = of_read_number(p++, 1);
+ dr_cell->aa_index = of_read_number(p++, 1);
+ dr_cell->flags = of_read_number(p++, 1);
+
+ *prop = p;
+}
+
+static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
+ void (*func)(struct drmem_lmb *, const __be32 **))
+{
+ struct of_drconf_cell_v2 dr_cell;
+ struct drmem_lmb lmb;
+ u32 i, j, lmb_sets;
+
+ lmb_sets = of_read_number(prop++, 1);
+ if (lmb_sets == 0)
+ return;
+
+ for (i = 0; i < lmb_sets; i++) {
+ read_drconf_v2_cell(&dr_cell, &prop);
+
+ for (j = 0; j < dr_cell.seq_lmbs; j++) {
+ lmb.base_addr = dr_cell.base_addr;
+ dr_cell.base_addr += drmem_lmb_size();
+
+ lmb.drc_index = dr_cell.drc_index;
+ dr_cell.drc_index++;
+
+ lmb.aa_index = dr_cell.aa_index;
+ lmb.flags = dr_cell.flags;
+
+ func(&lmb, &usm);
+ }
+ }
+}
+
+#ifdef CONFIG_PPC_PSERIES
+void __init walk_drmem_lmbs_early(unsigned long node,
+ void (*func)(struct drmem_lmb *, const __be32 **))
+{
+ const __be32 *prop, *usm;
+ int len;
+
+ prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
+ if (!prop || len < dt_root_size_cells * sizeof(__be32))
+ return;
+
+ drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
+
+ prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
+ if (prop) {
+ __walk_drmem_v1_lmbs(prop, usm, func);
+ } else {
+ prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
+ &len);
+ if (prop)
+ __walk_drmem_v2_lmbs(prop, usm, func);
+ }
+
+ memblock_dump_all();
+}
+
+#endif
+
+static int __init init_drmem_lmb_size(struct device_node *dn)
+{
+ const __be32 *prop;
+ int len;
+
+ if (drmem_info->lmb_size)
+ return 0;
+
+ prop = of_get_property(dn, "ibm,lmb-size", &len);
+ if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
+ pr_info("Could not determine LMB size\n");
+ return -1;
+ }
+
+ drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ return 0;
+}
+
+/*
+ * Returns the property linux,drconf-usable-memory if
+ * it exists (the property exists only in kexec/kdump kernels,
+ * added by kexec-tools)
+ */
+static const __be32 *of_get_usable_memory(struct device_node *dn)
+{
+ const __be32 *prop;
+ u32 len;
+
+ prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
+ if (!prop || len < sizeof(unsigned int))
+ return NULL;
+
+ return prop;
+}
+
+void __init walk_drmem_lmbs(struct device_node *dn,
+ void (*func)(struct drmem_lmb *, const __be32 **))
+{
+ const __be32 *prop, *usm;
+
+ if (init_drmem_lmb_size(dn))
+ return;
+
+ usm = of_get_usable_memory(dn);
+
+ prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
+ if (prop) {
+ __walk_drmem_v1_lmbs(prop, usm, func);
+ } else {
+ prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
+ if (prop)
+ __walk_drmem_v2_lmbs(prop, usm, func);
+ }
+}
+
+static void __init init_drmem_v1_lmbs(const __be32 *prop)
+{
+ struct drmem_lmb *lmb;
+
+ drmem_info->n_lmbs = of_read_number(prop++, 1);
+ if (drmem_info->n_lmbs == 0)
+ return;
+
+ drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
+ GFP_KERNEL);
+ if (!drmem_info->lmbs)
+ return;
+
+ for_each_drmem_lmb(lmb)
+ read_drconf_v1_cell(lmb, &prop);
+}
+
+static void __init init_drmem_v2_lmbs(const __be32 *prop)
+{
+ struct drmem_lmb *lmb;
+ struct of_drconf_cell_v2 dr_cell;
+ const __be32 *p;
+ u32 i, j, lmb_sets;
+ int lmb_index;
+
+ lmb_sets = of_read_number(prop++, 1);
+ if (lmb_sets == 0)
+ return;
+
+ /* first pass, calculate the number of LMBs */
+ p = prop;
+ for (i = 0; i < lmb_sets; i++) {
+ read_drconf_v2_cell(&dr_cell, &p);
+ drmem_info->n_lmbs += dr_cell.seq_lmbs;
+ }
+
+ drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
+ GFP_KERNEL);
+ if (!drmem_info->lmbs)
+ return;
+
+ /* second pass, read in the LMB information */
+ lmb_index = 0;
+ p = prop;
+
+ for (i = 0; i < lmb_sets; i++) {
+ read_drconf_v2_cell(&dr_cell, &p);
+
+ for (j = 0; j < dr_cell.seq_lmbs; j++) {
+ lmb = &drmem_info->lmbs[lmb_index++];
+
+ lmb->base_addr = dr_cell.base_addr;
+ dr_cell.base_addr += drmem_info->lmb_size;
+
+ lmb->drc_index = dr_cell.drc_index;
+ dr_cell.drc_index++;
+
+ lmb->aa_index = dr_cell.aa_index;
+ lmb->flags = dr_cell.flags;
+ }
+ }
+}
+
+static int __init drmem_init(void)
+{
+ struct device_node *dn;
+ const __be32 *prop;
+
+ dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!dn) {
+ pr_info("No dynamic reconfiguration memory found\n");
+ return 0;
+ }
+
+ if (init_drmem_lmb_size(dn)) {
+ of_node_put(dn);
+ return 0;
+ }
+
+ prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
+ if (prop) {
+ init_drmem_v1_lmbs(prop);
+ } else {
+ prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
+ if (prop)
+ init_drmem_v2_lmbs(prop);
+ }
+
+ of_node_put(dn);
+ return 0;
+}
+late_initcall(drmem_init);
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 4/4] powerpc/pseries: Move DRMEM processing out of numa.c
2018-11-27 20:31 [PATCH 0/4] powerpc/pseries: Refactor code to centralize drmem feature Michael Bringmann
2018-11-27 20:32 ` [PATCH 1/4] powerpc/pseries: Relocate drmem.c to pseries Michael Bringmann
@ 2018-11-27 20:37 ` Michael Bringmann
2018-11-27 20:39 ` [PATCH 4/4] powerpc/pseries: Relocate drmem.h to pseries Michael Bringmann
2 siblings, 0 replies; 4+ messages in thread
From: Michael Bringmann @ 2018-11-27 20:37 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Michael Bringmann, Juliet Kim, Thomas Falcon, Tyrel Datwyler
The implementation of the pseries-specific dynamic memory features
is currently implemented in several non-pseries-specific files.
This patch set moves the implementation of the device-tree parsing
code for the properties ibm,dynamic-memory, ibm,dynamic-memory-v2,
and its representation in the kernel into the platform-specific
directory to the Pseries features.
This patch refactors references to drmem features out of numa.c, so
that they can be moved to drmem.c. Changes include exporting a few
support functions from numa.c via powerpc/include/asm/topology.h, and
the creation of platform function platform_parse_numa_properties that
any powerpc platform may implement.
Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/topology.h | 13 +
arch/powerpc/mm/numa.c | 238 +++--------------------
arch/powerpc/platforms/pseries/drmem.c | 330 ++++++++++++++++++++++++++++----
3 files changed, 329 insertions(+), 252 deletions(-)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index a4a718d..0c1ad7e 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -135,5 +135,18 @@ static inline void shared_proc_topology_init(void) {}
#endif
#endif
+extern unsigned long numa_enforce_memory_limit(unsigned long start,
+ unsigned long size);
+extern void initialize_distance_lookup_table(int nid,
+ const __be32 *associativity);
+extern int fake_numa_create_new_node(unsigned long end_pfn,
+ unsigned int *nid);
+
+struct assoc_arrays {
+ u32 n_arrays;
+ u32 array_sz;
+ const __be32 *arrays;
+};
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3a048e9..6c982df 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -39,7 +39,6 @@
#include <asm/hvcall.h>
#include <asm/setup.h>
#include <asm/vdso.h>
-#include <asm/drmem.h>
static int numa_enabled = 1;
@@ -87,8 +86,8 @@ static void __init setup_node_to_cpumask_map(void)
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
-static int __init fake_numa_create_new_node(unsigned long end_pfn,
- unsigned int *nid)
+int __init fake_numa_create_new_node(unsigned long end_pfn,
+ unsigned int *nid)
{
unsigned long long mem;
char *p = cmdline;
@@ -194,7 +193,7 @@ int __node_distance(int a, int b)
}
EXPORT_SYMBOL(__node_distance);
-static void initialize_distance_lookup_table(int nid,
+void initialize_distance_lookup_table(int nid,
const __be32 *associativity)
{
int i;
@@ -209,6 +208,7 @@ static void initialize_distance_lookup_table(int nid,
distance_lookup_table[nid][i] = of_read_number(entry, 1);
}
}
+EXPORT_SYMBOL(initialize_distance_lookup_table);
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
* info is found.
@@ -356,98 +356,6 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
of_node_put(memory);
}
-static unsigned long read_n_cells(int n, const __be32 **buf)
-{
- unsigned long result = 0;
-
- while (n--) {
- result = (result << 32) | of_read_number(*buf, 1);
- (*buf)++;
- }
- return result;
-}
-
-struct assoc_arrays {
- u32 n_arrays;
- u32 array_sz;
- const __be32 *arrays;
-};
-
-/*
- * Retrieve and validate the list of associativity arrays for drconf
- * memory from the ibm,associativity-lookup-arrays property of the
- * device tree..
- *
- * The layout of the ibm,associativity-lookup-arrays property is a number N
- * indicating the number of associativity arrays, followed by a number M
- * indicating the size of each associativity array, followed by a list
- * of N associativity arrays.
- */
-static int of_get_assoc_arrays(struct assoc_arrays *aa)
-{
- struct device_node *memory;
- const __be32 *prop;
- u32 len;
-
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (!memory)
- return -1;
-
- prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
- if (!prop || len < 2 * sizeof(unsigned int)) {
- of_node_put(memory);
- return -1;
- }
-
- aa->n_arrays = of_read_number(prop++, 1);
- aa->array_sz = of_read_number(prop++, 1);
-
- of_node_put(memory);
-
- /* Now that we know the number of arrays and size of each array,
- * revalidate the size of the property read in.
- */
- if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
- return -1;
-
- aa->arrays = prop;
- return 0;
-}
-
-/*
- * This is like of_node_to_nid_single() for memory represented in the
- * ibm,dynamic-reconfiguration-memory node.
- */
-static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
-{
- struct assoc_arrays aa = { .arrays = NULL };
- int default_nid = 0;
- int nid = default_nid;
- int rc, index;
-
- rc = of_get_assoc_arrays(&aa);
- if (rc)
- return default_nid;
-
- if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
- !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
- lmb->aa_index < aa.n_arrays) {
- index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
- nid = of_read_number(&aa.arrays[index], 1);
-
- if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = default_nid;
-
- if (nid > 0) {
- index = lmb->aa_index * aa.array_sz;
- initialize_distance_lookup_table(nid,
- &aa.arrays[index]);
- }
- }
-
- return nid;
-}
-
/*
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
@@ -536,7 +444,7 @@ static int ppc_numa_cpu_dead(unsigned int cpu)
* or zero. If the returned value of size is 0 the region should be
* discarded as it lies wholly above the memory limit.
*/
-static unsigned long __init numa_enforce_memory_limit(unsigned long start,
+unsigned long __init numa_enforce_memory_limit(unsigned long start,
unsigned long size)
{
/*
@@ -555,67 +463,20 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
return memblock_end_of_DRAM() - start;
}
-/*
- * Reads the counter for a given entry in
- * linux,drconf-usable-memory property
- */
-static inline int __init read_usm_ranges(const __be32 **usm)
+static inline unsigned long read_n_cells(int n, const __be32 **buf)
{
- /*
- * For each lmb in ibm,dynamic-memory a corresponding
- * entry in linux,drconf-usable-memory property contains
- * a counter followed by that many (base, size) duple.
- * read the counter from linux,drconf-usable-memory
- */
- return read_n_cells(n_mem_size_cells, usm);
-}
-
-/*
- * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
- * node. This assumes n_mem_{addr,size}_cells have been set.
- */
-static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
- const __be32 **usm)
-{
- unsigned int ranges, is_kexec_kdump = 0;
- unsigned long base, size, sz;
- int nid;
-
- /*
- * Skip this block if the reserved bit is set in flags (0x80)
- * or if the block is not assigned to this partition (0x8)
- */
- if ((lmb->flags & DRCONF_MEM_RESERVED)
- || !(lmb->flags & DRCONF_MEM_ASSIGNED))
- return;
-
- if (*usm)
- is_kexec_kdump = 1;
-
- base = lmb->base_addr;
- size = drmem_lmb_size();
- ranges = 1;
+ unsigned long result = 0;
- if (is_kexec_kdump) {
- ranges = read_usm_ranges(usm);
- if (!ranges) /* there are no (base, size) duple */
- return;
+ while (n--) {
+ result = (result << 32) | of_read_number(*buf, 1);
+ (*buf)++;
}
+ return result;
+}
- do {
- if (is_kexec_kdump) {
- base = read_n_cells(n_mem_addr_cells, usm);
- size = read_n_cells(n_mem_size_cells, usm);
- }
-
- nid = of_drconf_to_nid_single(lmb);
- fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
- &nid);
- node_set_online(nid);
- sz = numa_enforce_memory_limit(base, size);
- if (sz)
- memblock_set_node(base, sz, &memblock.memory, nid);
- } while (--ranges);
+int __weak platform_parse_numa_properties(int min_common_depth)
+{
+ return min_common_depth;
}
static int __init parse_numa_properties(void)
@@ -704,16 +565,7 @@ static int __init parse_numa_properties(void)
goto new_range;
}
- /*
- * Now do the same thing for each MEMBLOCK listed in the
- * ibm,dynamic-memory property in the
- * ibm,dynamic-reconfiguration-memory node.
- */
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (memory) {
- walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
- of_node_put(memory);
- }
+ min_common_depth = platform_parse_numa_properties(min_common_depth);
return 0;
}
@@ -922,37 +774,6 @@ static int __init early_topology_updates(char *p)
#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Find the node associated with a hot added memory section for
- * memory represented in the device tree by the property
- * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
- */
-static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
-{
- struct drmem_lmb *lmb;
- unsigned long lmb_size;
- int nid = -1;
-
- lmb_size = drmem_lmb_size();
-
- for_each_drmem_lmb(lmb) {
- /* skip this block if it is reserved or not assigned to
- * this partition */
- if ((lmb->flags & DRCONF_MEM_RESERVED)
- || !(lmb->flags & DRCONF_MEM_ASSIGNED))
- continue;
-
- if ((scn_addr < lmb->base_addr)
- || (scn_addr >= (lmb->base_addr + lmb_size)))
- continue;
-
- nid = of_drconf_to_nid_single(lmb);
- break;
- }
-
- return nid;
-}
-
-/*
* Find the node associated with a hot added memory section for memory
* represented in the device tree as a node (i.e. memory@XXXX) for
* each memblock.
@@ -995,6 +816,11 @@ static int hot_add_node_scn_to_nid(unsigned long scn_addr)
return nid;
}
+int __weak platform_hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ return NUMA_NO_NODE;
+}
+
/*
* Find the node associated with a hot added memory section. Section
* corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
@@ -1002,17 +828,14 @@ static int hot_add_node_scn_to_nid(unsigned long scn_addr)
*/
int hot_add_scn_to_nid(unsigned long scn_addr)
{
- struct device_node *memory = NULL;
int nid;
if (!numa_enabled || (min_common_depth < 0))
return first_online_node;
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (memory) {
- nid = hot_add_drconf_scn_to_nid(scn_addr);
- of_node_put(memory);
- } else {
+ nid = platform_hot_add_scn_to_nid(scn_addr);
+ if (nid != NUMA_NO_NODE)
+ {
nid = hot_add_node_scn_to_nid(scn_addr);
}
@@ -1022,9 +845,13 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid;
}
+u64 __weak platform_hot_add_drconf_memory_max(void)
+{
+ return 0;
+}
+
static u64 hot_add_drconf_memory_max(void)
{
- struct device_node *memory = NULL;
struct device_node *dn = NULL;
const __be64 *lrdr = NULL;
@@ -1036,12 +863,7 @@ static u64 hot_add_drconf_memory_max(void)
return be64_to_cpup(lrdr);
}
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (memory) {
- of_node_put(memory);
- return drmem_lmb_memory_max();
- }
- return 0;
+ return platform_hot_add_drconf_memory_max();
}
/*
diff --git a/arch/powerpc/platforms/pseries/drmem.c b/arch/powerpc/platforms/pseries/drmem.c
index ccb0d3b..01ac651 100644
--- a/arch/powerpc/platforms/pseries/drmem.c
+++ b/arch/powerpc/platforms/pseries/drmem.c
@@ -16,8 +16,8 @@
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <asm/prom.h>
+#include <asm/iommu.h>
#include <asm/drmem.h>
-#include <asm/platform.h>
static struct drmem_lmb_info __drmem_info;
struct drmem_lmb_info *drmem_info = &__drmem_info;
@@ -297,6 +297,76 @@ void __init walk_drmem_lmbs_early(unsigned long node,
memblock_dump_all();
}
+/*
+ * Interpret the ibm dynamic reconfiguration memory LMBs.
+ * This contains a list of memory blocks along with NUMA affinity
+ * information.
+ */
+static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
+ const __be32 **usm)
+{
+ u64 base, size;
+ int is_kexec_kdump = 0, rngs;
+
+ base = lmb->base_addr;
+ size = drmem_lmb_size();
+ rngs = 1;
+
+ /*
+ * Skip this block if the reserved bit is set in flags
+ * or if the block is not assigned to this partition.
+ */
+ if ((lmb->flags & DRCONF_MEM_RESERVED) ||
+ !(lmb->flags & DRCONF_MEM_ASSIGNED))
+ return;
+
+ if (*usm)
+ is_kexec_kdump = 1;
+
+ if (is_kexec_kdump) {
+ /*
+ * For each memblock in ibm,dynamic-memory, a
+ * corresponding entry in linux,drconf-usable-memory
+ * property contains a counter 'p' followed by 'p'
+ * (base, size) duple. Now read the counter from
+ * linux,drconf-usable-memory property
+ */
+ rngs = dt_mem_next_cell(dt_root_size_cells, usm);
+ if (!rngs) /* there are no (base, size) duple */
+ return;
+ }
+
+ do {
+ if (is_kexec_kdump) {
+ base = dt_mem_next_cell(dt_root_addr_cells, usm);
+ size = dt_mem_next_cell(dt_root_size_cells, usm);
+ }
+
+ if (iommu_is_off) {
+ if (base >= 0x80000000ul)
+ continue;
+ if ((base + size) > 0x80000000ul)
+ size = 0x80000000ul - base;
+ }
+
+ pr_debug("Adding: %llx -> %llx\n", base, size);
+ if (validate_mem_limit(base, &size))
+ memblock_add(base, size);
+ } while (--rngs);
+}
+
+int __init platform_early_init_dt_scan_memory_ppc(unsigned long node,
+ const char *uname,
+ int depth, void *data)
+{
+ if (depth == 1 &&
+ strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
+ walk_drmem_lmbs_early(node, early_init_drmem_lmb);
+ return 0;
+ }
+
+ return -ENODEV;
+}
#endif
static int __init init_drmem_lmb_size(struct device_node *dn)
@@ -447,74 +517,246 @@ static int __init drmem_init(void)
}
late_initcall(drmem_init);
+/*
+ * Retrieve and validate the list of associativity arrays for drconf
+ * memory from the ibm,associativity-lookup-arrays property of the
+ * device tree..
+ *
+ * The layout of the ibm,associativity-lookup-arrays property is a number N
+ * indicating the number of associativity arrays, followed by a number M
+ * indicating the size of each associativity array, followed by a list
+ * of N associativity arrays.
+ */
+static int of_get_assoc_arrays(struct assoc_arrays *aa)
+{
+ struct device_node *memory;
+ const __be32 *prop;
+ u32 len;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!memory)
+ return -1;
+
+ prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
+ if (!prop || len < 2 * sizeof(unsigned int)) {
+ of_node_put(memory);
+ return -1;
+ }
+
+ aa->n_arrays = of_read_number(prop++, 1);
+ aa->array_sz = of_read_number(prop++, 1);
+
+ of_node_put(memory);
+
+ /* Now that we know the number of arrays and size of each array,
+ * revalidate the size of the property read in.
+ */
+ if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
+ return -1;
+
+ aa->arrays = prop;
+ return 0;
+}
+
+static int current_min_common_depth;
+static int n_mem_addr_cells, n_mem_size_cells;
/*
- * Interpret the ibm dynamic reconfiguration memory LMBs.
- * This contains a list of memory blocks along with NUMA affinity
- * information.
+ * This is like numa.c:of_node_to_nid_single() for memory represented
+ * in the ibm,dynamic-reconfiguration-memory node.
*/
-static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
- const __be32 **usm)
+static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
- u64 base, size;
- int is_kexec_kdump = 0, rngs;
+ struct assoc_arrays aa = { .arrays = NULL };
+ int default_nid = 0;
+ int nid = default_nid;
+ int rc, index;
+
+ rc = of_get_assoc_arrays(&aa);
+ if (rc)
+ return default_nid;
+
+ if (current_min_common_depth > 0 && current_min_common_depth <= aa.array_sz &&
+ !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
+ lmb->aa_index < aa.n_arrays) {
+ index = lmb->aa_index * aa.array_sz + current_min_common_depth - 1;
+ nid = of_read_number(&aa.arrays[index], 1);
+
+ if (nid == 0xffff || nid >= MAX_NUMNODES)
+ nid = default_nid;
+
+ if (nid > 0) {
+ index = lmb->aa_index * aa.array_sz;
+ initialize_distance_lookup_table(nid,
+ &aa.arrays[index]);
+ }
+ }
- base = lmb->base_addr;
- size = drmem_lmb_size();
- rngs = 1;
+ return nid;
+}
+static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
+{
+ struct device_node *memory = NULL;
+
+ memory = of_find_node_by_type(memory, "memory");
+ if (!memory)
+ panic("numa.c: No memory nodes found!");
+
+ *n_addr_cells = of_n_addr_cells(memory);
+ *n_size_cells = of_n_size_cells(memory);
+ of_node_put(memory);
+}
+
+static inline unsigned long read_n_cells(int n, const __be32 **buf)
+{
+ unsigned long result = 0;
+
+ while (n--) {
+ result = (result << 32) | of_read_number(*buf, 1);
+ (*buf)++;
+ }
+ return result;
+}
+
+/*
+ * Reads the counter for a given entry in
+ * linux,drconf-usable-memory property
+ */
+static inline int __init read_usm_ranges(const __be32 **usm)
+{
/*
- * Skip this block if the reserved bit is set in flags
- * or if the block is not assigned to this partition.
+ * For each lmb in ibm,dynamic-memory a corresponding
+ * entry in linux,drconf-usable-memory property contains
+ * a counter followed by that many (base, size) duple.
+ * read the counter from linux,drconf-usable-memory
*/
- if ((lmb->flags & DRCONF_MEM_RESERVED) ||
- !(lmb->flags & DRCONF_MEM_ASSIGNED))
+ return read_n_cells(n_mem_size_cells, usm);
+}
+
+/*
+ * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
+ * node. This assumes n_mem_{addr,size}_cells have been set.
+ */
+static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
+ const __be32 **usm)
+{
+ unsigned int ranges, is_kexec_kdump = 0;
+ unsigned long base, size, sz;
+ int nid;
+
+ /*
+ * Skip this block if the reserved bit is set in flags (0x80)
+ * or if the block is not assigned to this partition (0x8)
+ */
+ if ((lmb->flags & DRCONF_MEM_RESERVED)
+ || !(lmb->flags & DRCONF_MEM_ASSIGNED))
return;
if (*usm)
is_kexec_kdump = 1;
+ base = lmb->base_addr;
+ size = drmem_lmb_size();
+ ranges = 1;
+
if (is_kexec_kdump) {
- /*
- * For each memblock in ibm,dynamic-memory, a
- * corresponding entry in linux,drconf-usable-memory
- * property contains a counter 'p' followed by 'p'
- * (base, size) duple. Now read the counter from
- * linux,drconf-usable-memory property
- */
- rngs = dt_mem_next_cell(dt_root_size_cells, usm);
- if (!rngs) /* there are no (base, size) duple */
+ ranges = read_usm_ranges(usm);
+ if (!ranges) /* there are no (base, size) duple */
return;
}
+ get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
+
do {
if (is_kexec_kdump) {
- base = dt_mem_next_cell(dt_root_addr_cells, usm);
- size = dt_mem_next_cell(dt_root_size_cells, usm);
+ base = read_n_cells(n_mem_addr_cells, usm);
+ size = read_n_cells(n_mem_size_cells, usm);
}
- if (iommu_is_off) {
- if (base >= 0x80000000ul)
- continue;
- if ((base + size) > 0x80000000ul)
- size = 0x80000000ul - base;
- }
+ nid = of_drconf_to_nid_single(lmb);
+ fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
+ &nid);
+ node_set_online(nid);
+ sz = numa_enforce_memory_limit(base, size);
+ if (sz)
+ memblock_set_node(base, sz, &memblock.memory, nid);
+ } while (--ranges);
+}
- DBG("Adding: %llx -> %llx\n", base, size);
- if (validate_mem_limit(base, &size))
- memblock_add(base, size);
- } while (--rngs);
+int __init platform_parse_numa_properties(int min_common_depth)
+{
+ struct device_node *memory;
+
+ /*
+ * Now do the same thing for each MEMBLOCK listed in the
+ * ibm,dynamic-memory property in the
+ * ibm,dynamic-reconfiguration-memory node.
+ */
+ current_min_common_depth = min_common_depth;
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
+ of_node_put(memory);
+ }
+ return current_min_common_depth;
}
-int __init platform_early_init_dt_scan_memory_ppc(unsigned long node,
- const char *uname,
- int depth, void *data)
+u64 platform_hot_add_drconf_memory_max(void)
{
- if (depth == 1 &&
- strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
- walk_drmem_lmbs_early(node, early_init_drmem_lmb);
- return 0;
+ struct device_node *memory = NULL;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ of_node_put(memory);
+ return drmem_lmb_memory_max();
+ }
+ return 0;
+}
+
+
+/*
+ * Find the node associated with a hot added memory section for
+ * memory represented in the device tree by the property
+ * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
+ */
+static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
+{
+ struct drmem_lmb *lmb;
+ unsigned long lmb_size;
+ int nid = -1;
+
+ lmb_size = drmem_lmb_size();
+
+ for_each_drmem_lmb(lmb) {
+ /* skip this block if it is reserved or not assigned to
+ * this partition */
+ if ((lmb->flags & DRCONF_MEM_RESERVED)
+ || !(lmb->flags & DRCONF_MEM_ASSIGNED))
+ continue;
+
+ if ((scn_addr < lmb->base_addr)
+ || (scn_addr >= (lmb->base_addr + lmb_size)))
+ continue;
+
+ nid = of_drconf_to_nid_single(lmb);
+ break;
}
- return -ENODEV;
+ return nid;
+}
+
+int platform_hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ struct device_node *memory = NULL;
+ int nid;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ nid = hot_add_drconf_scn_to_nid(scn_addr);
+ of_node_put(memory);
+ return nid;
+ } else {
+ return NUMA_NO_NODE;
+ }
}
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 4/4] powerpc/pseries: Relocate drmem.h to pseries
2018-11-27 20:31 [PATCH 0/4] powerpc/pseries: Refactor code to centralize drmem feature Michael Bringmann
2018-11-27 20:32 ` [PATCH 1/4] powerpc/pseries: Relocate drmem.c to pseries Michael Bringmann
2018-11-27 20:37 ` [PATCH 4/4] powerpc/pseries: Move DRMEM processing out of numa.c Michael Bringmann
@ 2018-11-27 20:39 ` Michael Bringmann
2 siblings, 0 replies; 4+ messages in thread
From: Michael Bringmann @ 2018-11-27 20:39 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Michael Bringmann, Juliet Kim, Thomas Falcon, Tyrel Datwyler
The implementation of the pseries-specific dynamic memory features
is currently implemented in several non-pseries-specific files.
This patch set moves the implementation of the device-tree parsing
code for the properties ibm,dynamic-memory, ibm,dynamic-memory-v2,
and its representation in the kernel into the platform-specific
directory to the Pseries features.
This patch moves drmem.h from directory arch/powerpc/include/asm to
arch/powerpc/platforms/pseries, and fixing include file references
in pseries files.
Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/drmem.h | 107 -----------------------
arch/powerpc/platforms/pseries/drmem.c | 2
arch/powerpc/platforms/pseries/drmem.h | 107 +++++++++++++++++++++++
arch/powerpc/platforms/pseries/hotplug-memory.c | 2
arch/powerpc/platforms/pseries/lparcfg.c | 2
5 files changed, 110 insertions(+), 110 deletions(-)
delete mode 100644 arch/powerpc/include/asm/drmem.h
create mode 100644 arch/powerpc/platforms/pseries/drmem.h
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
deleted file mode 100644
index 7c1d8e7..0000000
--- a/arch/powerpc/include/asm/drmem.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * drmem.h: Power specific logical memory block representation
- *
- * Copyright 2017 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_POWERPC_LMB_H
-#define _ASM_POWERPC_LMB_H
-
-struct drmem_lmb {
- u64 base_addr;
- u32 drc_index;
- u32 aa_index;
- u32 flags;
-};
-
-struct drmem_lmb_info {
- struct drmem_lmb *lmbs;
- int n_lmbs;
- u32 lmb_size;
-};
-
-extern struct drmem_lmb_info *drmem_info;
-
-#define for_each_drmem_lmb_in_range(lmb, start, end) \
- for ((lmb) = (start); (lmb) <= (end); (lmb)++)
-
-#define for_each_drmem_lmb(lmb) \
- for_each_drmem_lmb_in_range((lmb), \
- &drmem_info->lmbs[0], \
- &drmem_info->lmbs[drmem_info->n_lmbs - 1])
-
-/*
- * The of_drconf_cell_v1 struct defines the layout of the LMB data
- * specified in the ibm,dynamic-memory device tree property.
- * The property itself is a 32-bit value specifying the number of
- * LMBs followed by an array of of_drconf_cell_v1 entries, one
- * per LMB.
- */
-struct of_drconf_cell_v1 {
- __be64 base_addr;
- __be32 drc_index;
- __be32 reserved;
- __be32 aa_index;
- __be32 flags;
-};
-
-/*
- * Version 2 of the ibm,dynamic-memory property is defined as a
- * 32-bit value specifying the number of LMB sets followed by an
- * array of of_drconf_cell_v2 entries, one per LMB set.
- */
-struct of_drconf_cell_v2 {
- u32 seq_lmbs;
- u64 base_addr;
- u32 drc_index;
- u32 aa_index;
- u32 flags;
-} __packed;
-
-#define DRCONF_MEM_ASSIGNED 0x00000008
-#define DRCONF_MEM_AI_INVALID 0x00000040
-#define DRCONF_MEM_RESERVED 0x00000080
-
-static inline u32 drmem_lmb_size(void)
-{
- return drmem_info->lmb_size;
-}
-
-#define DRMEM_LMB_RESERVED 0x80000000
-
-static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
-{
- lmb->flags |= DRMEM_LMB_RESERVED;
-}
-
-static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
-{
- lmb->flags &= ~DRMEM_LMB_RESERVED;
-}
-
-static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
-{
- return lmb->flags & DRMEM_LMB_RESERVED;
-}
-
-u64 drmem_lmb_memory_max(void);
-void __init walk_drmem_lmbs(struct device_node *dn,
- void (*func)(struct drmem_lmb *, const __be32 **));
-int drmem_update_dt(void);
-
-#ifdef CONFIG_PPC_PSERIES
-void __init walk_drmem_lmbs_early(unsigned long node,
- void (*func)(struct drmem_lmb *, const __be32 **));
-#endif
-
-static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
-{
- lmb->aa_index = 0xffffffff;
-}
-
-#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/platforms/pseries/drmem.c b/arch/powerpc/platforms/pseries/drmem.c
index 01ac651..a52f10e 100644
--- a/arch/powerpc/platforms/pseries/drmem.c
+++ b/arch/powerpc/platforms/pseries/drmem.c
@@ -17,7 +17,7 @@
#include <linux/memblock.h>
#include <asm/prom.h>
#include <asm/iommu.h>
-#include <asm/drmem.h>
+#include "drmem.h"
static struct drmem_lmb_info __drmem_info;
struct drmem_lmb_info *drmem_info = &__drmem_info;
diff --git a/arch/powerpc/platforms/pseries/drmem.h b/arch/powerpc/platforms/pseries/drmem.h
new file mode 100644
index 0000000..7c1d8e7
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/drmem.h
@@ -0,0 +1,107 @@
+/*
+ * drmem.h: Power specific logical memory block representation
+ *
+ * Copyright 2017 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_LMB_H
+#define _ASM_POWERPC_LMB_H
+
+struct drmem_lmb {
+ u64 base_addr;
+ u32 drc_index;
+ u32 aa_index;
+ u32 flags;
+};
+
+struct drmem_lmb_info {
+ struct drmem_lmb *lmbs;
+ int n_lmbs;
+ u32 lmb_size;
+};
+
+extern struct drmem_lmb_info *drmem_info;
+
+#define for_each_drmem_lmb_in_range(lmb, start, end) \
+ for ((lmb) = (start); (lmb) <= (end); (lmb)++)
+
+#define for_each_drmem_lmb(lmb) \
+ for_each_drmem_lmb_in_range((lmb), \
+ &drmem_info->lmbs[0], \
+ &drmem_info->lmbs[drmem_info->n_lmbs - 1])
+
+/*
+ * The of_drconf_cell_v1 struct defines the layout of the LMB data
+ * specified in the ibm,dynamic-memory device tree property.
+ * The property itself is a 32-bit value specifying the number of
+ * LMBs followed by an array of of_drconf_cell_v1 entries, one
+ * per LMB.
+ */
+struct of_drconf_cell_v1 {
+ __be64 base_addr;
+ __be32 drc_index;
+ __be32 reserved;
+ __be32 aa_index;
+ __be32 flags;
+};
+
+/*
+ * Version 2 of the ibm,dynamic-memory property is defined as a
+ * 32-bit value specifying the number of LMB sets followed by an
+ * array of of_drconf_cell_v2 entries, one per LMB set.
+ */
+struct of_drconf_cell_v2 {
+ u32 seq_lmbs;
+ u64 base_addr;
+ u32 drc_index;
+ u32 aa_index;
+ u32 flags;
+} __packed;
+
+#define DRCONF_MEM_ASSIGNED 0x00000008
+#define DRCONF_MEM_AI_INVALID 0x00000040
+#define DRCONF_MEM_RESERVED 0x00000080
+
+static inline u32 drmem_lmb_size(void)
+{
+ return drmem_info->lmb_size;
+}
+
+#define DRMEM_LMB_RESERVED 0x80000000
+
+static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
+{
+ lmb->flags |= DRMEM_LMB_RESERVED;
+}
+
+static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
+{
+ lmb->flags &= ~DRMEM_LMB_RESERVED;
+}
+
+static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
+{
+ return lmb->flags & DRMEM_LMB_RESERVED;
+}
+
+u64 drmem_lmb_memory_max(void);
+void __init walk_drmem_lmbs(struct device_node *dn,
+ void (*func)(struct drmem_lmb *, const __be32 **));
+int drmem_update_dt(void);
+
+#ifdef CONFIG_PPC_PSERIES
+void __init walk_drmem_lmbs_early(unsigned long node,
+ void (*func)(struct drmem_lmb *, const __be32 **));
+#endif
+
+static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
+{
+ lmb->aa_index = 0xffffffff;
+}
+
+#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2a983b5..988d67e4 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -23,7 +23,7 @@
#include <asm/prom.h>
#include <asm/sparsemem.h>
#include <asm/fadump.h>
-#include <asm/drmem.h>
+#include "drmem.h"
#include "pseries.h"
static bool rtas_hp_event;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 7944873..28d931e 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -37,8 +37,8 @@
#include <asm/vio.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
-#include <asm/drmem.h>
+#include "drmem.h"
#include "pseries.h"
/*
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2018-11-27 20:43 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-27 20:31 [PATCH 0/4] powerpc/pseries: Refactor code to centralize drmem feature Michael Bringmann
2018-11-27 20:32 ` [PATCH 1/4] powerpc/pseries: Relocate drmem.c to pseries Michael Bringmann
2018-11-27 20:37 ` [PATCH 4/4] powerpc/pseries: Move DRMEM processing out of numa.c Michael Bringmann
2018-11-27 20:39 ` [PATCH 4/4] powerpc/pseries: Relocate drmem.h to pseries Michael Bringmann
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).