linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2
@ 2017-01-25 16:10 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 1/7] x86/platform/UV: Add Support for UV4 Hubless systems 'Mike Travis, '
                   ` (6 more replies)
  0 siblings, 7 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel


Revised (V2) second UV Update PatchSet containing:

    * Fix the panic where KEXEC'd kernel does not have access to
      EFI runtime mappings. (already accepted)

    * Fix the panic with 2 socket configs. (already accepted)

    * A style cleanup patch from Ingo

    * Recognition and initialization of UV Hubless systems.

    * Addition of UV Hubless support for RMC system NMI command.

    * Addition of a basic CPU health check using NMI.

    * Verify new NMI action is valid.

    * Default NMI action standardized on "dump" (stack/regs).

    * Move check for "is UV system" from native_smp_prepare_cpus() to
      uv_system_init() since it needs to now check for hubless as well.

    * Style cleanup patch for the UV NMI code

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/7] x86/platform/UV: Add Support for UV4 Hubless systems
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 2/7] x86/platform/UV: Add Support for UV4 Hubless NMIs 'Mike Travis, '
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_add-hubless-support --]
[-- Type: text/plain, Size: 2793 bytes --]

Add recognition and support for UV4 hubless systems.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
---
 arch/x86/include/asm/uv/uv.h       |    2 ++
 arch/x86/kernel/apic/x2apic_uv_x.c |   30 ++++++++++++++++++++++++++++--
 2 files changed, 30 insertions(+), 2 deletions(-)

--- linux.orig/arch/x86/include/asm/uv/uv.h
+++ linux/arch/x86/include/asm/uv/uv.h
@@ -10,6 +10,7 @@ struct mm_struct;
 
 extern enum uv_system_type get_uv_system_type(void);
 extern int is_uv_system(void);
+extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
 extern void uv_system_init(void);
@@ -23,6 +24,7 @@ extern const struct cpumask *uv_flush_tl
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
 static inline int is_uv_system(void)	{ return 0; }
+static inline int is_uv_hubless(void)	{ return 0; }
 static inline void uv_cpu_init(void)	{ }
 static inline void uv_system_init(void)	{ }
 static inline const struct cpumask *
--- linux.orig/arch/x86/kernel/apic/x2apic_uv_x.c
+++ linux/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -42,6 +42,7 @@
 DEFINE_PER_CPU(int, x2apic_extra_bits);
 
 static enum uv_system_type	uv_system_type;
+static bool			uv_hubless_system;
 static u64			gru_start_paddr, gru_end_paddr;
 static u64			gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
 static u64			gru_dist_lmask, gru_dist_umask;
@@ -225,8 +226,14 @@ static int __init uv_acpi_madt_oem_check
 	int pnodeid;
 	int uv_apic;
 
-	if (strncmp(oem_id, "SGI", 3) != 0)
+	if (strncmp(oem_id, "SGI", 3) != 0) {
+		if (strncmp(oem_id, "NSGI", 4) == 0) {
+			uv_hubless_system = true;
+			pr_info("UV: OEM IDs %s/%s, HUBLESS\n",
+				oem_id, oem_table_id);
+		}
 		return 0;
+	}
 
 	if (numa_off) {
 		pr_err("UV: NUMA is off, disabling UV support\n");
@@ -300,6 +307,12 @@ int is_uv_system(void)
 }
 EXPORT_SYMBOL_GPL(is_uv_system);
 
+int is_uv_hubless(void)
+{
+	return uv_hubless_system;
+}
+EXPORT_SYMBOL_GPL(is_uv_hubless);
+
 void **__uv_hub_info_list;
 EXPORT_SYMBOL_GPL(__uv_hub_info_list);
 
@@ -1353,7 +1366,7 @@ static void __init build_socket_tables(v
 	}
 }
 
-void __init uv_system_init(void)
+static void __init uv_system_init_hub(void)
 {
 	struct uv_hub_info_s hub_info = {0};
 	int bytes, cpu, nodeid;
@@ -1490,4 +1503,17 @@ void __init uv_system_init(void)
 		reboot_type = BOOT_ACPI;
 }
 
+/*
+ * There is a small amount of UV specific code needed to initialize a
+ * UV system that does not have a "UV HUB" (referred to as "hubless").
+ */
+void __init uv_system_init(void)
+{
+	if (likely(!is_uv_system() && !is_uv_hubless()))
+		return;
+
+	if (is_uv_system())
+		uv_system_init_hub();
+}
+
 apic_driver(apic_x2apic_uv_x);

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 2/7] x86/platform/UV: Add Support for UV4 Hubless NMIs
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 1/7] x86/platform/UV: Add Support for UV4 Hubless systems 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 3/7] x86/platform/UV: Add basic CPU NMI health check 'Mike Travis, '
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_add-hubless-nmi --]
[-- Type: text/plain, Size: 11023 bytes --]

Merge new UV Hubless NMI support into existing UV NMI handler.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
---
 arch/x86/include/asm/uv/uv_hub.h   |    3 
 arch/x86/kernel/apic/x2apic_uv_x.c |    2 
 arch/x86/platform/uv/uv_nmi.c      |  193 ++++++++++++++++++++++++++++++++-----
 3 files changed, 176 insertions(+), 22 deletions(-)

--- linux.orig/arch/x86/include/asm/uv/uv_hub.h
+++ linux/arch/x86/include/asm/uv/uv_hub.h
@@ -772,6 +772,7 @@ static inline int uv_num_possible_blades
 
 /* Per Hub NMI support */
 extern void uv_nmi_setup(void);
+extern void uv_nmi_setup_hubless(void);
 
 /* BMC sets a bit this MMR non-zero before sending an NMI */
 #define UVH_NMI_MMR		UVH_SCRATCH5
@@ -799,6 +800,8 @@ struct uv_hub_nmi_s {
 	atomic_t	read_mmr_count;	/* count of MMR reads */
 	atomic_t	nmi_count;	/* count of true UV NMIs */
 	unsigned long	nmi_value;	/* last value read from NMI MMR */
+	bool		hub_present;	/* false means UV hubless system */
+	bool		pch_owner;	/* indicates this hub owns PCH */
 };
 
 struct uv_cpu_nmi_s {
--- linux.orig/arch/x86/kernel/apic/x2apic_uv_x.c
+++ linux/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1516,6 +1516,8 @@ void __init uv_system_init(void)
 
 	if (is_uv_system())
 		uv_system_init_hub();
+	else
+		uv_nmi_setup_hubless();
 }
 
 apic_driver(apic_x2apic_uv_x);
--- linux.orig/arch/x86/platform/uv/uv_nmi.c
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -67,6 +67,18 @@ static struct uv_hub_nmi_s **uv_hub_nmi_
 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
 EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
 
+/* UV hubless values */
+#define NMI_CONTROL_PORT	0x70
+#define NMI_DUMMY_PORT		0x71
+#define GPI_NMI_STS_GPP_D_0	0x164
+#define GPI_NMI_ENA_GPP_D_0	0x174
+#define STS_GPP_D_0_MASK	0x1
+#define PAD_CFG_DW0_GPP_D_0	0x4c0
+#define GPIROUTNMI		(1ul << 17)
+#define PCH_PCR_GPIO_1_BASE	0xfdae0000ul
+#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
+
+static u64 *pch_base;
 static unsigned long nmi_mmr;
 static unsigned long nmi_mmr_clear;
 static unsigned long nmi_mmr_pending;
@@ -144,6 +156,19 @@ module_param_named(wait_count, uv_nmi_wa
 static int uv_nmi_retry_count = 500;
 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
 
+static bool uv_pch_intr_enable = true;
+static bool uv_pch_intr_now_enabled;
+module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
+
+static int uv_nmi_debug;
+module_param_named(debug, uv_nmi_debug, int, 0644);
+
+#define nmi_debug(fmt, ...)				\
+	do {						\
+		if (uv_nmi_debug)			\
+			pr_info(fmt, ##__VA_ARGS__);	\
+	} while (0)
+
 /*
  * Valid NMI Actions:
  *  "dump"	- dump process stack for each cpu
@@ -192,6 +217,77 @@ static inline void uv_local_mmr_clear_nm
 }
 
 /*
+ * UV hubless NMI handler functions
+ */
+static inline void uv_reassert_nmi(void)
+{
+	/* (from arch/x86/include/asm/mach_traps.h) */
+	outb(0x8f, NMI_CONTROL_PORT);
+	inb(NMI_DUMMY_PORT);		/* dummy read */
+	outb(0x0f, NMI_CONTROL_PORT);
+	inb(NMI_DUMMY_PORT);		/* dummy read */
+}
+
+static void uv_init_hubless_pch_io(int offset, int mask, int data)
+{
+	int *addr = PCH_PCR_GPIO_ADDRESS(offset);
+	int readd = readl(addr);
+
+	if (mask) {			/* OR in new data */
+		int writed = (readd & ~mask) | data;
+
+		nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
+			addr, readd, ~mask, data, writed);
+		writel(writed, addr);
+	} else if (readd & data) {	/* clear status bit */
+		nmi_debug("UV:PCH: %p = %x\n", addr, data);
+		writel(data, addr);
+	}
+
+	(void)readl(addr);		/* flush write data */
+}
+
+static void uv_nmi_setup_hubless_intr(void)
+{
+	uv_pch_intr_now_enabled = uv_pch_intr_enable;
+
+	uv_init_hubless_pch_io(
+		PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
+		uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
+
+	nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
+		uv_pch_intr_now_enabled ? "enabled" : "disabled");
+}
+
+static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
+{
+	int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
+	int status = *pstat;
+
+	hub_nmi->nmi_value = status;
+	atomic_inc(&hub_nmi->read_mmr_count);
+
+	if (!(status & STS_GPP_D_0_MASK))	/* Not a UV external NMI */
+		return 0;
+
+	*pstat = STS_GPP_D_0_MASK;	/* Is a UV NMI: clear GPP_D_0 status */
+	(void)*pstat;			/* flush write */
+
+	return 1;
+}
+
+static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
+{
+	if (hub_nmi->hub_present)
+		return uv_nmi_test_mmr(hub_nmi);
+
+	if (hub_nmi->pch_owner)		/* Only PCH owner can check status */
+		return uv_nmi_test_hubless(hub_nmi);
+
+	return -1;
+}
+
+/*
  * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
  * return true.  If first cpu in on the system, set global "in_nmi" flag.
  */
@@ -214,6 +310,7 @@ static int uv_check_nmi(struct uv_hub_nm
 {
 	int cpu = smp_processor_id();
 	int nmi = 0;
+	int nmi_detected = 0;
 
 	local64_inc(&uv_nmi_count);
 	this_cpu_inc(uv_cpu_nmi.queries);
@@ -224,20 +321,26 @@ static int uv_check_nmi(struct uv_hub_nm
 			break;
 
 		if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
+			nmi_detected = uv_test_nmi(hub_nmi);
 
-			/* check hub MMR NMI flag */
-			if (uv_nmi_test_mmr(hub_nmi)) {
+			/* check flag for UV external NMI */
+			if (nmi_detected > 0) {
 				uv_set_in_nmi(cpu, hub_nmi);
 				nmi = 1;
 				break;
 			}
 
-			/* MMR NMI flag is clear */
+			/* A non-PCH node in a hubless system waits for NMI */
+			else if (nmi_detected < 0)
+				goto slave_wait;
+
+			/* MMR/PCH NMI flag is clear */
 			raw_spin_unlock(&hub_nmi->nmi_lock);
 
 		} else {
-			/* wait a moment for the hub nmi locker to set flag */
-			cpu_relax();
+
+			/* Wait a moment for the HUB NMI locker to set flag */
+slave_wait:		cpu_relax();
 			udelay(uv_nmi_slave_delay);
 
 			/* re-check hub in_nmi flag */
@@ -246,13 +349,20 @@ static int uv_check_nmi(struct uv_hub_nm
 				break;
 		}
 
-		/* check if this BMC missed setting the MMR NMI flag */
+		/*
+		 * Check if this BMC missed setting the MMR NMI flag (or)
+		 * UV hubless system where only PCH owner can check flag
+		 */
 		if (!nmi) {
 			nmi = atomic_read(&uv_in_nmi);
 			if (nmi)
 				uv_set_in_nmi(cpu, hub_nmi);
 		}
 
+		/* If we're holding the hub lock, release it now */
+		if (nmi_detected < 0)
+			raw_spin_unlock(&hub_nmi->nmi_lock);
+
 	} while (0);
 
 	if (!nmi)
@@ -269,7 +379,10 @@ static inline void uv_clear_nmi(int cpu)
 	if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
 		atomic_set(&hub_nmi->cpu_owner, -1);
 		atomic_set(&hub_nmi->in_nmi, 0);
-		uv_local_mmr_clear_nmi();
+		if (hub_nmi->hub_present)
+			uv_local_mmr_clear_nmi();
+		else
+			uv_reassert_nmi();
 		raw_spin_unlock(&hub_nmi->nmi_lock);
 	}
 }
@@ -297,11 +410,12 @@ static void uv_nmi_cleanup_mask(void)
 	}
 }
 
-/* Loop waiting as cpus enter nmi handler */
+/* Loop waiting as cpus enter NMI handler */
 static int uv_nmi_wait_cpus(int first)
 {
 	int i, j, k, n = num_online_cpus();
 	int last_k = 0, waiting = 0;
+	int cpu = smp_processor_id();
 
 	if (first) {
 		cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
@@ -310,6 +424,12 @@ static int uv_nmi_wait_cpus(int first)
 		k = n - cpumask_weight(uv_nmi_cpu_mask);
 	}
 
+	/* PCH NMI causes only one cpu to respond */
+	if (first && uv_pch_intr_now_enabled) {
+		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
+		return n - k - 1;
+	}
+
 	udelay(uv_nmi_initial_delay);
 	for (i = 0; i < uv_nmi_retry_count; i++) {
 		int loop_delay = uv_nmi_loop_delay;
@@ -358,7 +478,7 @@ static void uv_nmi_wait(int master)
 			break;
 
 		/* if not all made it in, send IPI NMI to them */
-		pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n",
+		pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
 			 cpumask_weight(uv_nmi_cpu_mask),
 			 cpumask_pr_args(uv_nmi_cpu_mask));
 
@@ -538,7 +658,7 @@ static inline int uv_nmi_kdb_reason(void
 #else /* !CONFIG_KGDB_KDB */
 static inline int uv_nmi_kdb_reason(void)
 {
-	/* Insure user is expecting to attach gdb remote */
+	/* Ensure user is expecting to attach gdb remote */
 	if (uv_nmi_action_is("kgdb"))
 		return 0;
 
@@ -626,15 +746,18 @@ int uv_handle_nmi(unsigned int reason, s
 	/* Pause as all cpus enter the NMI handler */
 	uv_nmi_wait(master);
 
-	/* Dump state of each cpu */
-	if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
+	/* Process actions other than "kdump": */
+	if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
 		uv_nmi_dump_state(cpu, regs, master);
-
-	/* Call KGDB/KDB if enabled */
-	else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
+	} else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
 		uv_call_kgdb_kdb(cpu, regs, master);
+	} else {
+		if (master)
+			pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
+		uv_nmi_sync_exit(master);
+	}
 
-	/* Clear per_cpu "in nmi" flag */
+	/* Clear per_cpu "in_nmi" flag */
 	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
 
 	/* Clear MMR NMI flag on each hub */
@@ -648,6 +771,7 @@ int uv_handle_nmi(unsigned int reason, s
 		atomic_set(&uv_nmi_cpu, -1);
 		atomic_set(&uv_in_nmi, 0);
 		atomic_set(&uv_nmi_kexec_failed, 0);
+		atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
 	}
 
 	uv_nmi_touch_watchdogs();
@@ -697,28 +821,53 @@ void uv_nmi_init(void)
 	apic_write(APIC_LVT1, value);
 }
 
-void uv_nmi_setup(void)
+/* Setup HUB NMI info */
+void __init uv_nmi_setup_common(bool hubbed)
 {
 	int size = sizeof(void *) * (1 << NODES_SHIFT);
-	int cpu, nid;
+	int cpu;
 
-	/* Setup hub nmi info */
-	uv_nmi_setup_mmrs();
 	uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
-	pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
+	nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
 	BUG_ON(!uv_hub_nmi_list);
 	size = sizeof(struct uv_hub_nmi_s);
 	for_each_present_cpu(cpu) {
-		nid = cpu_to_node(cpu);
+		int nid = cpu_to_node(cpu);
 		if (uv_hub_nmi_list[nid] == NULL) {
 			uv_hub_nmi_list[nid] = kzalloc_node(size,
 							    GFP_KERNEL, nid);
 			BUG_ON(!uv_hub_nmi_list[nid]);
 			raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
 			atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
+			uv_hub_nmi_list[nid]->hub_present = hubbed;
+			uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
 		}
 		uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
 	}
 	BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+}
+
+/* Setup for UV Hub systems */
+void __init uv_nmi_setup(void)
+{
+	uv_nmi_setup_mmrs();
+	uv_nmi_setup_common(true);
+	uv_register_nmi_notifier();
+	pr_info("UV: Hub NMI enabled\n");
+}
+
+/* Setup for UV Hubless systems */
+void __init uv_nmi_setup_hubless(void)
+{
+	uv_nmi_setup_common(false);
+	pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
+	nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
+		pch_base, PCH_PCR_GPIO_1_BASE);
+	uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
+				STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
+	uv_nmi_setup_hubless_intr();
+	/* Ensure NMI enabled in Processor Interface Reg: */
+	uv_reassert_nmi();
 	uv_register_nmi_notifier();
+	pr_info("UV: Hubless NMI enabled\n");
 }

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 3/7] x86/platform/UV: Add basic CPU NMI health check
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 1/7] x86/platform/UV: Add Support for UV4 Hubless systems 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 2/7] x86/platform/UV: Add Support for UV4 Hubless NMIs 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 4/7] x86/platform/UV: Verify NMI action is valid, default is standard 'Mike Travis, '
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_add-health-check --]
[-- Type: text/plain, Size: 2007 bytes --]

Add a low impact health check triggered by the system NMI command
that essentially checks which CPUs are responding to external NMI's.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
###Reviewed-by: Alex Thorlton <athorlton@sgi.com>
---
 arch/x86/platform/uv/uv_nmi.c |   21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

--- linux.orig/arch/x86/platform/uv/uv_nmi.c
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -176,6 +176,7 @@ module_param_named(debug, uv_nmi_debug,
  *  "kdump"	- do crash dump
  *  "kdb"	- enter KDB (default)
  *  "kgdb"	- enter KGDB
+ *  "health"	- check if CPUs respond to NMI
  */
 static char uv_nmi_action[8] = "kdb";
 module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
@@ -571,6 +572,22 @@ static void uv_nmi_sync_exit(int master)
 	}
 }
 
+/* Current "health" check is to check which CPU's are responsive */
+static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
+{
+	if (master) {
+		int in = atomic_read(&uv_nmi_cpus_in_nmi);
+		int out = num_online_cpus() - in;
+
+		pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
+		atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+	} else {
+		while (!atomic_read(&uv_nmi_slave_continue))
+			cpu_relax();
+	}
+	uv_nmi_sync_exit(master);
+}
+
 /* Walk through cpu list and dump state of each */
 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
 {
@@ -747,7 +764,9 @@ int uv_handle_nmi(unsigned int reason, s
 	uv_nmi_wait(master);
 
 	/* Process actions other than "kdump": */
-	if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
+	if (uv_nmi_action_is("health")) {
+		uv_nmi_action_health(cpu, regs, master);
+	} else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
 		uv_nmi_dump_state(cpu, regs, master);
 	} else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
 		uv_call_kgdb_kdb(cpu, regs, master);

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 4/7] x86/platform/UV: Verify NMI action is valid, default is standard
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
                   ` (2 preceding siblings ...)
  2017-01-25 16:10 ` [PATCH 3/7] x86/platform/UV: Add basic CPU NMI health check 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 5/7] x86/platform/UV: Initialize PCH GPP_D_0 NMI Pin to be NMI source 'Mike Travis, '
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_add-check-action --]
[-- Type: text/plain, Size: 2998 bytes --]

Verify that the NMI action being set is valid.  The default NMI action
changes from the non-standard 'kdb' to the more standard 'dump'.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
###Reviewed-by: Alex Thorlton <athorlton@sgi.com>
---
Note: this patch fails checkpatch with the following error:
WARNING: do not add new typedefs
#44: FILE: arch/x86/platform/uv/uv_nmi.c:186:
+typedef char action_t[ACTION_LEN];

But I do not know another way to create the correct parameter for this check:
+#define param_check_action(name, p) __param_check(name, p, action_t)
---
 arch/x86/platform/uv/uv_nmi.c |   69 +++++++++++++++++++++++++++++++++++-------
 1 file changed, 58 insertions(+), 11 deletions(-)

--- linux.orig/arch/x86/platform/uv/uv_nmi.c
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -170,17 +170,64 @@ module_param_named(debug, uv_nmi_debug,
 			pr_info(fmt, ##__VA_ARGS__);	\
 	} while (0)
 
-/*
- * Valid NMI Actions:
- *  "dump"	- dump process stack for each cpu
- *  "ips"	- dump IP info for each cpu
- *  "kdump"	- do crash dump
- *  "kdb"	- enter KDB (default)
- *  "kgdb"	- enter KGDB
- *  "health"	- check if CPUs respond to NMI
- */
-static char uv_nmi_action[8] = "kdb";
-module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
+/* Valid NMI Actions */
+#define	ACTION_LEN	16
+static struct nmi_action {
+	char	*action;
+	char	*desc;
+} valid_acts[] = {
+	{	"kdump",	"do kernel crash dump"			},
+	{	"dump",		"dump process stack for each cpu"	},
+	{	"ips",		"dump Inst Ptr info for each cpu"	},
+	{	"kdb",		"enter KDB (needs kgdboc= assignment)"	},
+	{	"kgdb",		"enter KGDB (needs gdb target remote)"	},
+	{	"health",	"check if CPUs respond to NMI"		},
+};
+typedef char action_t[ACTION_LEN];
+static action_t uv_nmi_action = { "dump" };
+
+static int param_get_action(char *buffer, const struct kernel_param *kp)
+{
+	return sprintf(buffer, "%s\n", uv_nmi_action);
+}
+
+static int param_set_action(const char *val, const struct kernel_param *kp)
+{
+	int i;
+	int n = ARRAY_SIZE(valid_acts);
+	char arg[ACTION_LEN], *p;
+
+	/* (remove possible '\n') */
+	strncpy(arg, val, ACTION_LEN - 1);
+	arg[ACTION_LEN - 1] = '\0';
+	p = strchr(arg, '\n');
+	if (p)
+		*p = '\0';
+
+	for (i = 0; i < n; i++)
+		if (!strcmp(arg, valid_acts[i].action))
+			break;
+
+	if (i < n) {
+		strcpy(uv_nmi_action, arg);
+		pr_info("UV: New NMI action:%s\n", uv_nmi_action);
+		return 0;
+	}
+
+	pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
+	for (i = 0; i < n; i++)
+		pr_err("UV: %-8s - %s\n",
+			valid_acts[i].action, valid_acts[i].desc);
+	return -EINVAL;
+}
+
+static const struct kernel_param_ops param_ops_action = {
+	.get = param_get_action,
+	.set = param_set_action,
+};
+#define param_check_action(name, p) __param_check(name, p, action_t)
+
+module_param_named(action, uv_nmi_action, action, 0644);
 
 static inline bool uv_nmi_action_is(const char *action)
 {

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 5/7] x86/platform/UV: Initialize PCH GPP_D_0 NMI Pin to be NMI source
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
                   ` (3 preceding siblings ...)
  2017-01-25 16:10 ` [PATCH 4/7] x86/platform/UV: Verify NMI action is valid, default is standard 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 7/7] x86/platform/UV: Clean up NMI code to match current coding style 'Mike Travis, '
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_add-hubless-nmi-init-pch --]
[-- Type: text/plain, Size: 4907 bytes --]

The initialize PCH NMI I/O function is separate and may be moved to BIOS
for security reasons.  This function detects whether the PCH NMI config
has already been done and if not, it will then initialize the PCH here.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
---
 arch/x86/platform/uv/uv_nmi.c |  127 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 127 insertions(+)

--- linux.orig/arch/x86/platform/uv/uv_nmi.c
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -71,6 +71,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
 static int *pch_base;
 #define NMI_CONTROL_PORT	0x70
 #define NMI_DUMMY_PORT		0x71
+#define PAD_OWN_GPP_D_0		0x2c
 #define GPI_NMI_STS_GPP_D_0	0x164
 #define GPI_NMI_ENA_GPP_D_0	0x174
 #define STS_GPP_D_0_MASK	0x1
@@ -161,6 +162,9 @@ static bool uv_pch_intr_enable = true;
 static bool uv_pch_intr_now_enabled;
 module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
 
+static bool uv_pch_init_enable = true;
+module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
+
 static int uv_nmi_debug;
 module_param_named(debug, uv_nmi_debug, int, 0644);
 
@@ -308,6 +312,127 @@ static void uv_nmi_setup_hubless_intr(vo
 		uv_pch_intr_now_enabled ? "enabled" : "disabled");
 }
 
+static struct init_nmi {
+	unsigned int	offset;
+	unsigned int	mask;
+	unsigned int	data;
+} init_nmi[] = {
+	{	/* HOSTSW_OWN_GPP_D_0 */
+	.offset = 0x84,
+	.mask = 0x1,
+	.data = 0x0,	/* ACPI Mode */
+	},
+
+/* clear status */
+	{	/* GPI_INT_STS_GPP_D_0 */
+	.offset = 0x104,
+	.mask = 0x0,
+	.data = 0x1,	/* Clear Status */
+	},
+	{	/* GPI_GPE_STS_GPP_D_0 */
+	.offset = 0x124,
+	.mask = 0x0,
+	.data = 0x1,	/* Clear Status */
+	},
+	{	/* GPI_SMI_STS_GPP_D_0 */
+	.offset = 0x144,
+	.mask = 0x0,
+	.data = 0x1,	/* Clear Status */
+	},
+	{	/* GPI_NMI_STS_GPP_D_0 */
+	.offset = 0x164,
+	.mask = 0x0,
+	.data = 0x1,	/* Clear Status */
+	},
+
+/* disable interrupts */
+	{	/* GPI_INT_EN_GPP_D_0 */
+	.offset = 0x114,
+	.mask = 0x1,
+	.data = 0x0,	/* disable interrupt generation */
+	},
+	{	/* GPI_GPE_EN_GPP_D_0 */
+	.offset = 0x134,
+	.mask = 0x1,
+	.data = 0x0,	/* disable interrupt generation */
+	},
+	{	/* GPI_SMI_EN_GPP_D_0 */
+	.offset = 0x154,
+	.mask = 0x1,
+	.data = 0x0,	/* disable interrupt generation */
+	},
+	{	/* GPI_NMI_EN_GPP_D_0 */
+	.offset = 0x174,
+	.mask = 0x1,
+	.data = 0x0,	/* disable interrupt generation */
+	},
+
+/* setup GPP_D_0 Pad Config */
+	{	/* PAD_CFG_DW0_GPP_D_0 */
+	.offset = 0x4c0,
+	.mask = 0xffffffff,
+	.data = 0x82020100,
+/*
+ *  31:30 Pad Reset Config (PADRSTCFG): = 2h  # PLTRST# (default)
+ *
+ *  29    RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
+ *                                                from RX buffer (default)
+ *
+ *  28    RX Raw Override to '1' (RXRAW1): = 0 # No Override
+ *
+ *  26:25 RX Level/Edge Configuration (RXEVCFG):
+ *      = 0h # Level
+ *      = 1h # Edge
+ *
+ *  23    RX Invert (RXINV): = 0 # No Inversion (signal active high)
+ *
+ *  20    GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
+ * = 0 # Routing does not cause peripheral IRQ...
+ *     # (we want an NMI not an IRQ)
+ *
+ *  19    GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
+ *  18    GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
+ *  17    GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
+ *
+ *  11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
+ *   9    GPIO RX Disable (GPIORXDIS):
+ * = 0 # Enable the input buffer (active low enable)
+ *
+ *   8    GPIO TX Disable (GPIOTXDIS):
+ * = 1 # Disable the output buffer; i.e. Hi-Z
+ *
+ *   1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
+ *   0 GPIO TX State (GPIOTXSTATE):
+ * = 0 # (Leave at default)
+ */
+	},
+
+/* Pad Config DW1 */
+	{	/* PAD_CFG_DW1_GPP_D_0 */
+	.offset = 0x4c4,
+	.mask = 0x3c00,
+	.data = 0,	/* Termination = none (default) */
+	},
+};
+
+static void uv_init_hubless_pch_d0(void)
+{
+	int i, read;
+
+	read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
+	if (read != 0) {
+		pr_info("UV: Hubless NMI already configured\n");
+		return;
+	}
+
+	nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
+	for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
+		uv_init_hubless_pch_io(init_nmi[i].offset,
+					init_nmi[i].mask,
+					init_nmi[i].data);
+	}
+}
+
 static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
 {
 	int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
@@ -936,6 +1061,8 @@ void __init uv_nmi_setup_hubless(void)
 	pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
 	nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
 		pch_base, PCH_PCR_GPIO_1_BASE);
+	if (uv_pch_init_enable)
+		uv_init_hubless_pch_d0();
 	uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
 				STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
 	uv_nmi_setup_hubless_intr();

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
                   ` (4 preceding siblings ...)
  2017-01-25 16:10 ` [PATCH 5/7] x86/platform/UV: Initialize PCH GPP_D_0 NMI Pin to be NMI source 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  2017-01-25 16:10 ` [PATCH 7/7] x86/platform/UV: Clean up NMI code to match current coding style 'Mike Travis, '
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv4_call_uv_system_init --]
[-- Type: text/plain, Size: 970 bytes --]

Move the check to whether this is a UV system that needs initialization
from is_uv_system() to the internal uv_system_init() function.  This is
because on a UV system without a HUB the is_uv_system() returns false.
But we still need some specific UV system initialization.  See the
uv_system_init() for change to a quick check if UV is applicable. This
change should not increase overhead since is_uv_system() also called
into this same area.

Signed-off-by: Mike Travis <travis@sgi.com>
###Acked-by: Dimitri Sivanich <sivanich@hpe.com>
###Reviewed-by: Russ Anderson <rja@hpe.com>
---
 arch/x86/kernel/smpboot.c |    3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

--- linux.orig/arch/x86/kernel/smpboot.c
+++ linux/arch/x86/kernel/smpboot.c
@@ -1341,8 +1341,7 @@ void __init native_smp_prepare_cpus(unsi
 	pr_info("CPU0: ");
 	print_cpu_info(&cpu_data(0));
 
-	if (is_uv_system())
-		uv_system_init();
+	uv_system_init();
 
 	set_mtrr_aps_delayed_init();
 

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 7/7] x86/platform/UV: Clean up NMI code to match current coding style
  2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
                   ` (5 preceding siblings ...)
  2017-01-25 16:10 ` [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary 'Mike Travis, '
@ 2017-01-25 16:10 ` 'Mike Travis, '
  6 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:10 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel

[-- Attachment #1: uv_clean_up_nmi_code --]
[-- Type: text/plain, Size: 9205 bytes --]

Update UV NMI to current coding style.

Signed-off-by: Mike Travis <travis@sgi.com>
---
 arch/x86/platform/uv/uv_nmi.c |   74 +++++++++++++++++++++---------------------
 1 file changed, 37 insertions(+), 37 deletions(-)

--- linux.orig/arch/x86/platform/uv/uv_nmi.c
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -45,8 +45,8 @@
  *
  * Handle system-wide NMI events generated by the global 'power nmi' command.
  *
- * Basic operation is to field the NMI interrupt on each cpu and wait
- * until all cpus have arrived into the nmi handler.  If some cpus do not
+ * Basic operation is to field the NMI interrupt on each CPU and wait
+ * until all CPU's have arrived into the nmi handler.  If some CPU's do not
  * make it into the handler, try and force them in with the IPI(NMI) signal.
  *
  * We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
  * To do this we register our primary NMI notifier on the NMI_UNKNOWN
  * chain.  This reduces the number of false NMI calls when the perf
  * tools are running which generate an enormous number of NMIs per
- * second (~4M/s for 1024 cpu threads).  Our secondary NMI handler is
+ * second (~4M/s for 1024 CPU threads).  Our secondary NMI handler is
  * very short as it only checks that if it has been "pinged" with the
  * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
  *
@@ -113,7 +113,7 @@ static int param_get_local64(char *buffe
 
 static int param_set_local64(const char *val, const struct kernel_param *kp)
 {
-	/* clear on any write */
+	/* Clear on any write */
 	local64_set((local64_t *)kp->arg, 0);
 	return 0;
 }
@@ -322,7 +322,7 @@ static struct init_nmi {
 	.data = 0x0,	/* ACPI Mode */
 	},
 
-/* clear status */
+/* Clear status: */
 	{	/* GPI_INT_STS_GPP_D_0 */
 	.offset = 0x104,
 	.mask = 0x0,
@@ -344,29 +344,29 @@ static struct init_nmi {
 	.data = 0x1,	/* Clear Status */
 	},
 
-/* disable interrupts */
+/* Disable interrupts: */
 	{	/* GPI_INT_EN_GPP_D_0 */
 	.offset = 0x114,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_GPE_EN_GPP_D_0 */
 	.offset = 0x134,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_SMI_EN_GPP_D_0 */
 	.offset = 0x154,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_NMI_EN_GPP_D_0 */
 	.offset = 0x174,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 
-/* setup GPP_D_0 Pad Config */
+/* Setup GPP_D_0 Pad Config: */
 	{	/* PAD_CFG_DW0_GPP_D_0 */
 	.offset = 0x4c0,
 	.mask = 0xffffffff,
@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv
 		return 0;
 
 	*pstat = STS_GPP_D_0_MASK;	/* Is a UV NMI: clear GPP_D_0 status */
-	(void)*pstat;			/* flush write */
+	(void)*pstat;			/* Flush write */
 
 	return 1;
 }
@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi
 }
 
 /*
- * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
- * return true.  If first cpu in on the system, set global "in_nmi" flag.
+ * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
+ * return true.  If first CPU in on the system, set global "in_nmi" flag.
  */
 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
 {
@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nm
 		if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
 			nmi_detected = uv_test_nmi(hub_nmi);
 
-			/* check flag for UV external NMI */
+			/* Check flag for UV external NMI */
 			if (nmi_detected > 0) {
 				uv_set_in_nmi(cpu, hub_nmi);
 				nmi = 1;
@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nm
 slave_wait:		cpu_relax();
 			udelay(uv_nmi_slave_delay);
 
-			/* re-check hub in_nmi flag */
+			/* Re-check hub in_nmi flag */
 			nmi = atomic_read(&hub_nmi->in_nmi);
 			if (nmi)
 				break;
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
 	}
 }
 
-/* Ping non-responding cpus attemping to force them into the NMI handler */
+/* Ping non-responding CPU's attemping to force them into the NMI handler */
 static void uv_nmi_nr_cpus_ping(void)
 {
 	int cpu;
@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
 	apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
 }
 
-/* Clean up flags for cpus that ignored both NMI and ping */
+/* Clean up flags for CPU's that ignored both NMI and ping */
 static void uv_nmi_cleanup_mask(void)
 {
 	int cpu;
@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
 	}
 }
 
-/* Loop waiting as cpus enter NMI handler */
+/* Loop waiting as CPU's enter NMI handler */
 static int uv_nmi_wait_cpus(int first)
 {
 	int i, j, k, n = num_online_cpus();
@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
 		k = n - cpumask_weight(uv_nmi_cpu_mask);
 	}
 
-	/* PCH NMI causes only one cpu to respond */
+	/* PCH NMI causes only one CPU to respond */
 	if (first && uv_pch_intr_now_enabled) {
 		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
 		return n - k - 1;
@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
 			k = n;
 			break;
 		}
-		if (last_k != k) {	/* abort if no new cpus coming in */
+		if (last_k != k) {	/* abort if no new CPU's coming in */
 			last_k = k;
 			waiting = 0;
 		} else if (++waiting > uv_nmi_wait_count)
 			break;
 
-		/* extend delay if waiting only for cpu 0 */
+		/* Extend delay if waiting only for CPU 0: */
 		if (waiting && (n - k) == 1 &&
 		    cpumask_test_cpu(0, uv_nmi_cpu_mask))
 			loop_delay *= 100;
@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
 	return n - k;
 }
 
-/* Wait until all slave cpus have entered UV NMI handler */
+/* Wait until all slave CPU's have entered UV NMI handler */
 static void uv_nmi_wait(int master)
 {
-	/* indicate this cpu is in */
+	/* Indicate this CPU is in: */
 	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
 
-	/* if not the first cpu in (the master), then we are a slave cpu */
+	/* If not the first CPU in (the master), then we are a slave CPU */
 	if (!master)
 		return;
 
 	do {
-		/* wait for all other cpus to gather here */
+		/* Wait for all other CPU's to gather here */
 		if (!uv_nmi_wait_cpus(1))
 			break;
 
-		/* if not all made it in, send IPI NMI to them */
+		/* If not all made it in, send IPI NMI to them */
 		pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
 			 cpumask_weight(uv_nmi_cpu_mask),
 			 cpumask_pr_args(uv_nmi_cpu_mask));
 
 		uv_nmi_nr_cpus_ping();
 
-		/* if all cpus are in, then done */
+		/* If all CPU's are in, then done */
 		if (!uv_nmi_wait_cpus(0))
 			break;
 
@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cp
 	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
 }
 
-/* Trigger a slave cpu to dump it's state */
+/* Trigger a slave CPU to dump it's state */
 static void uv_nmi_trigger_dump(int cpu)
 {
 	int retry = uv_nmi_trigger_delay;
@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
 	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
 }
 
-/* Wait until all cpus ready to exit */
+/* Wait until all CPU's ready to exit */
 static void uv_nmi_sync_exit(int master)
 {
 	atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu
 	uv_nmi_sync_exit(master);
 }
 
-/* Walk through cpu list and dump state of each */
+/* Walk through CPU list and dump state of each */
 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
 {
 	if (master) {
@@ -872,7 +872,7 @@ static void uv_call_kgdb_kdb(int cpu, st
 		if (reason < 0)
 			return;
 
-		/* call KGDB NMI handler as MASTER */
+		/* Call KGDB NMI handler as MASTER */
 		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
 				&uv_nmi_slave_continue);
 		if (ret) {
@@ -880,7 +880,7 @@ static void uv_call_kgdb_kdb(int cpu, st
 			atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
 		}
 	} else {
-		/* wait for KGDB signal that it's ready for slaves to enter */
+		/* Wait for KGDB signal that it's ready for slaves to enter */
 		int sig;
 
 		do {
@@ -888,7 +888,7 @@ static void uv_call_kgdb_kdb(int cpu, st
 			sig = atomic_read(&uv_nmi_slave_continue);
 		} while (!sig);
 
-		/* call KGDB as slave */
+		/* Call KGDB as slave */
 		if (sig == SLAVE_CONTINUE)
 			kgdb_nmicallback(cpu, regs);
 	}
@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, s
 			strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
 	}
 
-	/* Pause as all cpus enter the NMI handler */
+	/* Pause as all CPU's enter the NMI handler */
 	uv_nmi_wait(master);
 
 	/* Process actions other than "kdump": */
@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, s
 }
 
 /*
- * NMI handler for pulling in CPUs when perf events are grabbing our NMI
+ * NMI handler for pulling in CPU's when perf events are grabbing our NMI
  */
 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
 {
@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
 	unsigned int value;
 
 	/*
-	 * Unmask NMI on all cpus
+	 * Unmask NMI on all CPU's
 	 */
 	value = apic_read(APIC_LVT1) | APIC_DM_NMI;
 	value &= ~APIC_LVT_MASKED;

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary
  2017-01-25 16:35 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
@ 2017-01-25 16:35 ` 'Mike Travis, '
  0 siblings, 0 replies; 9+ messages in thread
From: 'Mike Travis, ' @ 2017-01-25 16:35 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin, Russ Anderson
  Cc: x86, linux-kernel, Dimitri Sivanich

[-- Attachment #1: uv4_call_uv_system_init --]
[-- Type: text/plain, Size: 964 bytes --]

Move the check to whether this is a UV system that needs initialization
from is_uv_system() to the internal uv_system_init() function.  This is
because on a UV system without a HUB the is_uv_system() returns false.
But we still need some specific UV system initialization.  See the
uv_system_init() for change to a quick check if UV is applicable. This
change should not increase overhead since is_uv_system() also called
into this same area.

Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Dimitri Sivanich <sivanich@hpe.com>
Reviewed-by: Russ Anderson <rja@hpe.com>
---
 arch/x86/kernel/smpboot.c |    3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

--- linux.orig/arch/x86/kernel/smpboot.c
+++ linux/arch/x86/kernel/smpboot.c
@@ -1341,8 +1341,7 @@ void __init native_smp_prepare_cpus(unsi
 	pr_info("CPU0: ");
 	print_cpu_info(&cpu_data(0));
 
-	if (is_uv_system())
-		uv_system_init();
+	uv_system_init();
 
 	set_mtrr_aps_delayed_init();
 

-- 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-01-25 16:35 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-25 16:10 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 1/7] x86/platform/UV: Add Support for UV4 Hubless systems 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 2/7] x86/platform/UV: Add Support for UV4 Hubless NMIs 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 3/7] x86/platform/UV: Add basic CPU NMI health check 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 4/7] x86/platform/UV: Verify NMI action is valid, default is standard 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 5/7] x86/platform/UV: Initialize PCH GPP_D_0 NMI Pin to be NMI source 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary 'Mike Travis, '
2017-01-25 16:10 ` [PATCH 7/7] x86/platform/UV: Clean up NMI code to match current coding style 'Mike Travis, '
2017-01-25 16:35 [PATCH 0/7] x86/platform/UV: UV Update PatchSet 2 'Mike Travis, '
2017-01-25 16:35 ` [PATCH 6/7] x86/platform/UV: Insure uv_system_init is called when necessary 'Mike Travis, '

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).