linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH][RFC] Discontigmem support for the x440
@ 2003-02-06  7:10 Patricia Gaughen
  2003-02-08 19:23 ` Martin J. Bligh
  0 siblings, 1 reply; 5+ messages in thread
From: Patricia Gaughen @ 2003-02-06  7:10 UTC (permalink / raw)
  To: linux-kernel; +Cc: chandra.sekharan, cleverdj, johnstul


This patch provides discontigmem support for the IBM x440.  This code has 
passed through the hands of several developers:  Chandra Seetharaman, James 
Cleverdon, John Stultz, and last to touch it, me :-)  This patch requires full 
acpi support.

I've tested this patch on an 8 way x440 16 GB of RAM with and without HT 
(acpi=off).

Any and all feedback regarding this patch is greatly appreciated.

Thanks,
Pat

-- 
Patricia Gaughen (gone@us.ibm.com)
IBM Linux Technology Center
http://www.ibm.com/linux/ltc/

diff -Nru a/arch/i386/Kconfig b/arch/i386/Kconfig
--- a/arch/i386/Kconfig	Wed Feb  5 19:15:58 2003
+++ b/arch/i386/Kconfig	Wed Feb  5 19:15:58 2003
@@ -474,7 +474,7 @@
 # Common NUMA Features
 config NUMA
 	bool "Numa Memory Allocation Support"
-	depends on X86_NUMAQ
+	depends on (X86_NUMAQ || X86_SUMMIT)
 
 config DISCONTIGMEM
 	bool
diff -Nru a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
--- a/arch/i386/kernel/Makefile	Wed Feb  5 19:15:58 2003
+++ b/arch/i386/kernel/Makefile	Wed Feb  5 19:15:58 2003
@@ -31,6 +31,9 @@
 obj-$(CONFIG_EDD)             	+= edd.o
 obj-$(CONFIG_MODULES)		+= module.o
 obj-y				+= sysenter.o
+ifdef CONFIG_NUMA
+obj-$(CONFIG_X86_SUMMIT) 	+= srat.o
+endif
 
 EXTRA_AFLAGS   := -traditional
 
diff -Nru a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/i386/kernel/srat.c	Wed Feb  5 19:15:58 2003
@@ -0,0 +1,411 @@
+/*
+ * Some of the code in this file has been gleaned from the 64 bit 
+ * discontigmem support code base.
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.          
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to Pat Gaughen <gone@us.ibm.com>
+ */
+
+/*
+ * ACPI 2.0 SRAT Table
+ * http://www.microsoft.com/HWDEV/design/SRAT.htm
+ * Processor and Memory affinity information
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/acpi.h>
+#include <asm/tlbflush.h>
+#include <asm/srat.h>
+
+#define NUM_KLUDGE_PAGES	4	/* Size of page descriptor kludge */
+#define PAGE_KLUDGE_START	((u32 *)empty_zero_page - NUM_KLUDGE_PAGES)
+
+/*
+ * proximity macros and definitions
+ */
+#define NODE_ARRAY_INDEX(x)	((x) / 8)	/* 8 bits/char */
+#define NODE_ARRAY_OFFSET(x)	((x) % 8)	/* 8 bits/char */
+#define BMAP_SET(bmap, bit)	((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << 
NODE_ARRAY_OFFSET(bit))
+#define BMAP_TEST(bmap, bit)	((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << 
NODE_ARRAY_OFFSET(bit)))
+#define MAX_PXM_DOMAINS		256	/* 1 byte and no promises about values */
+/* bitmap length; _PXM is at most 255 */
+#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) 
+static u8 pxm_bitmap[PXM_BITMAP_LEN];	/* bitmap of proximity domains */
+
+struct node_memory_chunk_s node_memory_chunk[MAXCLUMPS];
+
+static int num_memory_chunks;		/* total number of memory chunks */
+static unsigned long zholes_size[MAX_NUMNODES];
+
+unsigned long node_start_pfn[MAX_NUMNODES];
+unsigned long node_end_pfn[MAX_NUMNODES];
+
+/* extern unsigned char acpi_checksum(void *buffer, int length); */
+
+/* Identify which cnode a physical address resides on */
+int pfn_to_nid(unsigned long pfn)
+{
+	int	i;
+	struct node_memory_chunk_s *nmcp;
+
+	/* We've got a sorted list.  Binary search here?  Do we care?? */
+	nmcp = node_memory_chunk;
+	for (i = num_memory_chunks; --i >= 0; nmcp++)
+		if (pfn >= nmcp->start_pfn && pfn <= nmcp->end_pfn)
+			return (int)nmcp->nid;
+
+	return -1;
+}
+
+/* Identify CPU proximity domains */
+
+static void __init parse_cpu_affinity_structure(char *p)
+{
+	struct acpi_table_processor_affinity *cpu_affinity = 
+				(struct acpi_table_processor_affinity *) p;
+
+	if (!cpu_affinity->flags.enabled)
+		return;		/* empty entry */
+
+	/* mark this node as "seen" in node bitmap */
+	BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain);
+
+	printk("CPU 0x%02X in proximity domain 0x%02X\n",
+		cpu_affinity->apic_id, cpu_affinity->proximity_domain);
+}
+
+/*
+ * Identify memory proximity domains and hot-remove capabilities.
+ * Fill node memory chunk list structure.
+ */
+
+static void __init parse_memory_affinity_structure (char *sratp)
+{
+	unsigned long long paddr, size;
+	unsigned long start_pfn, end_pfn; 
+	u8 pxm;
+	struct node_memory_chunk_s *p, *q, *pend;
+	struct acpi_table_memory_affinity *memory_affinity =
+			(struct acpi_table_memory_affinity *) sratp;
+
+	if (!memory_affinity->flags.enabled)
+		return;		/* empty entry */
+
+	/* mark this node as "seen" in node bitmap */
+	BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain);
+
+	/* calculate info for memory chunk structure */
+	paddr = memory_affinity->base_addr_hi;
+	paddr = (paddr << 32) | memory_affinity->base_addr_lo;
+	size = memory_affinity->length_hi;
+	size = (size << 32) | memory_affinity->length_lo;
+	
+	start_pfn = paddr >> PAGE_SHIFT;
+	end_pfn = (paddr + size) >> PAGE_SHIFT;
+	
+	pxm = memory_affinity->proximity_domain;
+
+	if (num_memory_chunks >= MAXCLUMPS) {
+		printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
+			size/(1024*1024), paddr);
+		return;
+	}
+
+	/* Insertion sort based on base address */
+	pend = &node_memory_chunk[num_memory_chunks];
+	for (p = &node_memory_chunk[0]; p < pend; p++) {
+		if (start_pfn < p->start_pfn)
+			break;
+	}
+	if (p < pend) {
+		for (q = pend; q >= p; q--)
+			*(q + 1) = *q;
+	}
+	p->start_pfn = start_pfn;
+	p->end_pfn = end_pfn;
+	p->pxm = pxm;
+
+	num_memory_chunks++;
+
+
+	printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X 
%s\n",
+		start_pfn, end_pfn,
+		memory_affinity->memory_type,
+		memory_affinity->proximity_domain,
+		(memory_affinity->flags.hot_pluggable ?
+		 "enabled and removable" : "enabled" ) );
+}
+
+
+/* Parse the ACPI Static Resource Affinity Table */
+static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
+{
+	u8 *start, *end, *p;
+	int i, j, nid;
+	u8 pxm_to_nid_map[MAX_PXM_DOMAINS];/* _PXM to logical node ID map */
+	u8 nid_to_pxm_map[MAX_NUMNODES];/* logical node ID to _PXM map */
+
+	start = (u8 *)(&(sratp->reserved) + 1);	/* skip header */
+	p = start;
+	end = (u8 *)sratp + sratp->header.length;
+
+	memset(pxm_bitmap, 0, sizeof(pxm_bitmap));	/* init proximity domain bitmap */
+	memset(node_memory_chunk, 0, sizeof(node_memory_chunk));
+	memset(zholes_size, 0, sizeof(zholes_size));
+
+	/* -1 in these maps means not available */
+	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
+	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
+
+	num_memory_chunks = 0;
+	while (p < end) {
+		switch (*p) {
+		case ACPI_SRAT_PROCESSOR_AFFINITY:
+			parse_cpu_affinity_structure(p);
+			break;
+		case ACPI_SRAT_MEMORY_AFFINITY:
+			parse_memory_affinity_structure(p);
+			break;
+		default:
+			printk("ACPI 2.0 SRAT: unknown entry skipped: type=0x%02X, len=%d\n", 
p[0], p[1]);
+			break;
+		}
+		p += p[1];
+		if (p[1] == 0) {
+			printk("acpi20_parse_srat: Entry length value is zero;"
+				" can't parse any further!\n");
+			break;
+		}
+	}
+
+	/* Calculate total number of nodes in system from PXM bitmap and create
+	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
+	 * to specify the range of _PXM values.)
+	 */
+	numnodes = 0;		/* init total nodes in system */
+	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
+		if (BMAP_TEST(pxm_bitmap, i)) {
+			pxm_to_nid_map[i] = numnodes;
+			nid_to_pxm_map[numnodes] = i;
+			node_set_online(numnodes);
+			++numnodes;
+		}
+	}
+
+	if (numnodes == 0)
+		BUG();
+
+	/* set cnode id in memory chunk structure */
+	for (i = 0; i < num_memory_chunks; i++)
+		node_memory_chunk[i].nid = pxm_to_nid_map[node_memory_chunk[i].pxm];
+
+	printk("pxm bitmap: ");
+	for (i = 0; i < sizeof(pxm_bitmap); i++) {
+		printk("%02X ", pxm_bitmap[i]);
+	}
+	printk("\n");
+	printk("Number of logical nodes in system = %d\n", numnodes);
+	printk("Number of memory chunks in system = %d\n", num_memory_chunks);
+
+	for (j = 0; j < num_memory_chunks; j++){
+		printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
+		       j, node_memory_chunk[j].nid,
+		       node_memory_chunk[j].start_pfn,
+		       node_memory_chunk[j].end_pfn);
+	}
+ 
+	/*calculate node_start_pfn/node_end_pfn arrays*/
+	for (nid = 0; nid < numnodes; nid++) {
+		int been_here_before = 0;
+
+		for (j = 0; j < num_memory_chunks; j++){
+			if (node_memory_chunk[j].nid == nid) {
+				if (been_here_before == 0) {
+					node_start_pfn[nid] = node_memory_chunk[j].start_pfn;
+					node_end_pfn[nid] = node_memory_chunk[j].end_pfn;
+					been_here_before = 1;
+				} else { /* We've found another chunk of memory for the node */
+					if (node_start_pfn[nid] < node_memory_chunk[j].start_pfn) {
+						printk("found a another chunk on nid %d, chunk %d\n", nid, j);
+
+						zholes_size[nid] = zholes_size[nid] +
+							(node_memory_chunk[j].start_pfn
+							 - node_end_pfn[nid]);
+						node_end_pfn[nid] = node_memory_chunk[j].end_pfn;
+					}
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+#define kludge_to_virt(idx) \
+	(PAGE_SIZE * ((unsigned long)((u32 *)empty_zero_page - (u32 *)pg0) - \
+         NUM_KLUDGE_PAGES + (unsigned long)(idx)) )
+
+#define pde_kludge(idx, phys) \
+	(PAGE_KLUDGE_START[idx] = ((phys) & ~(PAGE_SIZE - 1)) | \
+        (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED))
+
+/*
+ * Temporarily use the virtual area starting from PAGE_KLUDGE_START,
+ * to map the target physical address.  By using this area, we can
+ * map up to NUM_KLUDGE_PAGES pages temporarily, i.e. until the next
+ * page_kludge() call.
+ */
+static __init void * page_kludge(unsigned long phys, unsigned long size)
+{
+	unsigned long base, offset, mapped_size;
+	int idx;
+
+	offset = phys & (PAGE_SIZE - 1);
+	mapped_size = PAGE_SIZE - offset;
+	pde_kludge(0, phys);
+	base = kludge_to_virt(0);
+	__flush_tlb_one(base);
+	wbinvd();
+
+	printk("page_kludge(0x%lx, 0x%lx): idx=%d mapped at %lx\n", phys, size,
+		FIX_IO_APIC_BASE_END, base);
+
+	/*
+	 * Most cases can be covered by the below.
+	 */
+	idx = 0;
+	while (mapped_size < size) {
+		if (idx >= NUM_KLUDGE_PAGES)
+			return NULL;	/* cannot handle this */
+		phys += PAGE_SIZE;
+		pde_kludge(idx, phys);
+		__flush_tlb_one(kludge_to_virt(idx));
+		mapped_size += PAGE_SIZE;
+		++idx;
+	}
+
+	return((void *)(base + offset));
+}
+
+
+void __init get_memcfg_from_srat(void)
+{
+	struct acpi_table_header *header = NULL;
+	struct acpi_table_rsdp *rsdp = NULL;
+	struct acpi_table_rsdt *rsdt = NULL;
+	struct acpi_pointer *rsdp_address = NULL;
+	struct acpi_table_rsdt saved_rsdt;
+	int tables = 0;
+	int i = 0;
+	u32 pde_save[NUM_KLUDGE_PAGES];
+
+	acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address);
+
+	if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
+		printk("%s: assigning address to rsdp\n", __FUNCTION__);
+		rsdp = (struct acpi_table_rsdp *)rsdp_address->pointer.physical;
+	} else {
+		printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
+		return;
+	}
+	if (!rsdp) {
+		printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
+		return;
+	}
+
+	printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
+		rsdp->oem_id);
+
+	if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) {
+		printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
+		return;
+	}
+
+	rsdt = (struct acpi_table_rsdt *)
+	    page_kludge(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt));
+
+	if (!rsdt) {
+		printk(KERN_WARNING
+		       "%s: ACPI: Invalid root system description tables (RSDT)\n",
+		       __FUNCTION__);
+		return;
+	}
+
+	header = & rsdt->header;
+
+	if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) {
+		printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
+		return;
+	}
+
+	/* 
+	 * The number of tables is computed by taking the 
+	 * size of all entries (header size minus total 
+	 * size of RSDT) divided by the size of each entry
+	 * (4-byte table pointers).
+	 */
+	tables = (header->length - sizeof(struct acpi_table_header)) / 4;
+
+	memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
+
+	if (saved_rsdt.header.length > sizeof(saved_rsdt)) {
+		printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
+		       saved_rsdt.header.length);
+		return;
+	}
+
+printk("Begin table scan....\n");
+	memcpy(pde_save, PAGE_KLUDGE_START, sizeof(pde_save));
+
+	for (i = 0; i < tables; i++) {
+		/* Map in header, then map in full table length. */
+		header = (struct acpi_table_header *)
+			page_kludge(saved_rsdt.entry[i], sizeof(struct acpi_table_header));
+		if (!header)
+			break;
+		header = (struct acpi_table_header *)
+			page_kludge(saved_rsdt.entry[i], header->length);
+		if (!header)
+			break;
+
+		if (strncmp((char *) &header->signature, "SRAT", 4))
+			continue;
+		acpi20_parse_srat((struct acpi_table_srat *)header);
+		goto out;
+	}
+
+	printk("get_memcfg_from_srat:  no SRAT found!\n");
+ out:
+	/* Undo page kludge. */
+	memcpy(PAGE_KLUDGE_START, pde_save, sizeof(pde_save));
+	__flush_tlb();
+	wbinvd();
+}
+
+unsigned long __init get_zholes_size(int nid)
+{
+	if((nid >= numnodes) | (nid >= MAX_NUMNODES))
+		printk("%s: nid = %d is invalid. numnodes = %d",
+		       __FUNCTION__, nid, numnodes);
+	return zholes_size[nid];
+}
diff -Nru a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
--- a/arch/i386/mm/discontig.c	Wed Feb  5 19:15:58 2003
+++ b/arch/i386/mm/discontig.c	Wed Feb  5 19:15:58 2003
@@ -284,6 +284,7 @@
 
 	for (nid = 0; nid < numnodes; nid++) {
 		unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+		unsigned long zholes_size;
 		unsigned int max_dma;
 
 		unsigned long low = max_low_pfn;
@@ -307,6 +308,7 @@
 #endif
 			}
 		}
+		zholes_size = get_zholes_size(nid);
 		/*
 		 * We let the lmem_map for node 0 be allocated from the
 		 * normal bootmem allocator, but other nodes come from the
@@ -315,10 +317,10 @@
 		if (nid)
 			free_area_init_node(nid, NODE_DATA(nid), 
 				node_remap_start_vaddr[nid], zones_size, 
-				start, 0);
+				start, (unsigned long *)zholes_size);
 		else
 			free_area_init_node(nid, NODE_DATA(nid), 0, 
-				zones_size, start, 0);
+				zones_size, start, (unsigned long *)zholes_size);
 	}
 	return;
 }
diff -Nru a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
--- a/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
+++ b/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
@@ -104,6 +104,7 @@
 
 	ACPI_FUNCTION_TRACE ("ev_handler_initialize");
 
+	return_ACPI_STATUS (0);
 
 	/* Install the SCI handler */
 
diff -Nru a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
--- a/include/asm-i386/mmzone.h	Wed Feb  5 19:15:58 2003
+++ b/include/asm-i386/mmzone.h	Wed Feb  5 19:15:58 2003
@@ -12,6 +12,8 @@
 
 #ifdef CONFIG_X86_NUMAQ
 #include <asm/numaq.h>
+#elif CONFIG_X86_SUMMIT
+#include <asm/srat.h>
 #else
 #define pfn_to_nid(pfn)		(0)
 #endif /* CONFIG_X86_NUMAQ */
diff -Nru a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h
--- a/include/asm-i386/numaq.h	Wed Feb  5 19:15:58 2003
+++ b/include/asm-i386/numaq.h	Wed Feb  5 19:15:58 2003
@@ -168,6 +168,10 @@
         struct	eachquadmem eq[MAX_NUMNODES];	/* indexed by quad id */
 };
 
+static inline unsigned long get_zholes_size(int nid)
+{
+	return 0;
+}
 #endif /* CONFIG_X86_NUMAQ */
 #endif /* NUMAQ_H */
 
diff -Nru a/include/asm-i386/numnodes.h b/include/asm-i386/numnodes.h
--- a/include/asm-i386/numnodes.h	Wed Feb  5 19:15:58 2003
+++ b/include/asm-i386/numnodes.h	Wed Feb  5 19:15:58 2003
@@ -5,6 +5,8 @@
 
 #ifdef CONFIG_X86_NUMAQ
 #include <asm/numaq.h>
+#elif CONFIG_X86_SUMMIT
+#include <asm/srat.h>
 #else
 #define MAX_NUMNODES	1
 #endif /* CONFIG_X86_NUMAQ */
diff -Nru a/include/asm-i386/srat.h b/include/asm-i386/srat.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-i386/srat.h	Wed Feb  5 19:15:58 2003
@@ -0,0 +1,52 @@
+/*
+ * Some of the code in this file has been gleaned from the 64 bit 
+ * discontigmem support code base.
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to Pat Gaughen <gone@us.ibm.com>
+ */
+
+#ifndef _ASM_SRAT_H_
+#define _ASM_SRAT_H_
+
+#define PHYSADDR_TO_NID(pa) pfn_to_nid(pa >> PAGE_SHIFT)
+#define pfn_to_pgdat(pfn) NODE_DATA(pfn_to_nid(pfn))
+#define MAX_NUMNODES		8
+#define MAX_CLUMPS_PER_NODE	4
+#define MAXCLUMPS		(MAX_CLUMPS_PER_NODE * MAX_NUMNODES)
+extern int pfn_to_nid(unsigned long);
+extern void get_memcfg_from_srat(void);
+extern unsigned long get_zholes_size(int);
+#define get_memcfg_numa() get_memcfg_from_srat()
+
+/*
+ * memory -> pxm_domain structure
+ */
+struct node_memory_chunk_s {
+	unsigned long	start_pfn;
+	unsigned long	end_pfn;
+	u8	pxm;		// proximity domain of node
+	u8	nid;		// which cnode contains this chunk?
+	u8	bank;		// which mem bank on this node
+};
+extern struct node_memory_chunk_s node_memory_chunk[];
+
+#endif /* _ASM_SRAT_H_ */
diff -Nru a/include/linux/acpi.h b/include/linux/acpi.h
--- a/include/linux/acpi.h	Wed Feb  5 19:15:58 2003
+++ b/include/linux/acpi.h	Wed Feb  5 19:15:58 2003
@@ -82,7 +82,7 @@
 
 struct acpi_table_rsdt {
 	struct acpi_table_header header;
-	u32			entry[1];
+	u32			entry[8];
 } __attribute__ ((packed));
 
 /* Extended System Description Table (XSDT) */





^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][RFC] Discontigmem support for the x440
  2003-02-06  7:10 [PATCH][RFC] Discontigmem support for the x440 Patricia Gaughen
@ 2003-02-08 19:23 ` Martin J. Bligh
  0 siblings, 0 replies; 5+ messages in thread
From: Martin J. Bligh @ 2003-02-08 19:23 UTC (permalink / raw)
  To: gone, linux-kernel; +Cc: chandra.sekharan, cleverdj, johnstul

> +/* Identify which cnode a physical address resides on */
> +int pfn_to_nid(unsigned long pfn)
> +{
> +	int	i;
> +	struct node_memory_chunk_s *nmcp;
> +
> +	/* We've got a sorted list.  Binary search here?  Do we care?? */
> +	nmcp = node_memory_chunk;
> +	for (i = num_memory_chunks; --i >= 0; nmcp++)
> +		if (pfn >= nmcp->start_pfn && pfn <= nmcp->end_pfn)
> +			return (int)nmcp->nid;
> +
> +	return -1;
> +}

This is called a lot, and it's large and inefficient. Can you turn it 
into an array lookup like the NUMA-Q implementation, and inline it? 

All the clumps and chunks stuff can go, I think.

> diff -Nru a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
> --- a/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
> +++ b/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
> @@ -104,6 +104,7 @@
>  
>  	ACPI_FUNCTION_TRACE ("ev_handler_initialize");
>  
> +	return_ACPI_STATUS (0);
>  
>  	/* Install the SCI handler */

That used to be wrapped in ifdef CONFIG_SUMMIT, which seems much safer
to me ... any reason for the change?

And all the kludge stuff needs to go, but you know that already ;-)

M.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][RFC] Discontigmem support for the x440
  2003-02-18 22:39 Andy Whitcroft
@ 2003-02-18 22:55 ` Patricia Gaughen
  0 siblings, 0 replies; 5+ messages in thread
From: Patricia Gaughen @ 2003-02-18 22:55 UTC (permalink / raw)
  To: Andy Whitcroft; +Cc: linux-kernel


Hey Andy - Thanks.  Martin had said that you had thought it was wrong, so I 
had been looking it over this morning and realized my error :-)  Hadn't worked 
up a solution just yet, though... so I'm glad to some code with your comment.  
I'll review your changes, and if it works I'll add it to my patch.  :-)  I 
really appreciate your feedback!

Thanks,
Pat

  > 
  > --=-VhECMrXvMAbfEiHvGxl0
  > Content-Type: text/plain
  > Content-Transfer-Encoding: 7bit
  > 
  > Pat,
  > 
  > Whilst looking at the Summit NUMA support I believe I have found a bug
  > in the memory hole handling.  Specifically, there appears to be a type
  > mismatch between get_zholes_size() returning a single long and
  > free_area_init_core() requiring a log array.  What I cannot adequately
  > explain is why this does not lead to a panic during boot.
  > 
  > Attached is a patch against 2.5.59-mjb6 which I believe should correct
  > this.  It has been tested in isolation and compile tested, but as I
  > don't have access to a test machine I cannot be sure it works.  I
  > believe some investigation is needed to understand why this bug does not
  > prevent booting, or lead to a large disparity in the zone free page
  > counts, perhaps the e820 map is helping here.
  > 
  > [gory details for the interested]
  > Under NUMA support  constructing the memory map we call
  > free_area_init_node() to initilialise the pglist_data and allocate the
  > memory map structures. As part of this we supply a per node, per memory
  > zone page count and a per node, per memory zone missing page count. 
  > These are used in free_area_init_core() to determine the true number of
  > pages per node, per zone.  In the existing summit code we parse the SRAT
  > in order to locate and size the inter-chunk gaps, on a per node basis. 
  > Later this is queried via get_zholes_size() from zone_init_sizes(). 
  > Unfortuantly, get_zholes_size is returning a single long representing
  > the per node total holes, whilst zone_init_sizes() requires an array of
  > longs one per zone (long[MAX_NR_ZONES]). In the zero holes case this
  > will be safe as if there are zero pages of hole then we pass an
  > apparently null pointer to get_zholes_size which is interpreted as
  > having no holes.  If the presence of any such holes a low-memory
  > reference would be passed potentially leading to an oops.
  > 
  > The attached patch modifies the memory chunk hole scan such that each
  > hole is allocated to one or more zones using the calculated zone
  > boundries, converting zholes_size[] from a per node count to a per node,
  > per zone count in a similar form to the associated zones[] array.
  > 
  > Cheers.
  > 
  > -apw
  > 
  > 
  > --=-VhECMrXvMAbfEiHvGxl0
  > Content-Disposition: attachment; filename=patch.mjb6-zholes
  > Content-Transfer-Encoding: quoted-printable
  > Content-Type: text/x-patch; name=patch.mjb6-zholes; charset=UTF-8
  > 
  > diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/arch/i386/kerne
  > =
  > l/srat.c linux-2.5.59-mjb6-zholes/arch/i386/kernel/srat.c
  > --- linux-2.5.59-mjb6/arch/i386/kernel/srat.c	2003-02-12 11:28:54.000
  > 000000=
  >  +0000
  > +++ linux-2.5.59-mjb6-zholes/arch/i386/kernel/srat.c	2003-02-13 13:1
  > 7:08.00=
  > 0000000 +0000
  > @@ -50,7 +50,8 @@ static u8 pxm_bitmap[PXM_BITMAP_LEN];	/*
  >  struct node_memory_chunk_s node_memory_chunk[MAXCLUMPS];
  > =20
  >  static int num_memory_chunks;		/* total number of memory chunk
  > s */
  > -static unsigned long zholes_size[MAX_NUMNODES];
  > +static int zholes_size_init;
  > +static unsigned long zholes_size[MAX_NUMNODES * MAX_NR_ZONES];
  > =20
  >  unsigned long node_start_pfn[MAX_NUMNODES];
  >  unsigned long node_end_pfn[MAX_NUMNODES];
  > @@ -151,6 +152,49 @@ static void __init parse_memory_affinity
  >  		 "enabled and removable" : "enabled" ) );
  >  }
  > =20
  > +#if MAX_NR_ZONES !=3D 3
  > +#error "MAX_NR_ZONES !=3D 3, chunk_to_zone requires review"
  > +#endif
  > +/* Take a chunk of pages from page frame cstart to cend and count the numb
  > =
  > er
  > + * of pages in each zone, returned via zones[].
  > + */
  > +static __init void chunk_to_zones(unsigned long cstart, unsigned long cend
  > =
  > ,=20
  > +		unsigned long *zones)
  > +{
  > +	unsigned long max_dma;
  > +	extern unsigned long max_low_pfn;
  > +
  > +	int z;
  > +	unsigned long rend;
  > +
  > +	/* FIXME: MAX_DMA_ADDRESS and max_low_pfn are trying to provide
  > +	 * similarly scoped information and should be handled in a consistant
  > +	 * manner.
  > +	 */
  > +	max_dma =3D virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  > +
  > +	/* Split the hole into the zones in which it falls.  Repeatedly
  > +	 * take the segment in which the remaining hole starts, round it
  > +	 * to the end of that zone.
  > +	 */
  > +	memset(zones, 0, MAX_NR_ZONES * sizeof(long));
  > +	while (cstart < cend) {
  > +		if (cstart < max_dma) {
  > +			z =3D ZONE_DMA;
  > +			rend =3D (cend < max_dma)? cend : max_dma;
  > +
  > +		} else if (cstart < max_low_pfn) {
  > +			z =3D ZONE_NORMAL;
  > +			rend =3D (cend < max_low_pfn)? cend : max_low_pfn;
  > +
  > +		} else {
  > +			z =3D ZONE_HIGHMEM;
  > +			rend =3D cend;
  > +		}
  > +		zones[z] +=3D rend - cstart;
  > +		cstart =3D rend;
  > +	}
  > +}
  > =20
  >  /* Parse the ACPI Static Resource Affinity Table */
  >  static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
  > @@ -242,10 +286,6 @@ static int __init acpi20_parse_srat(stru
  >  				} else { /* We've found another chunk of memory
  >  for the node */
  >  					if (node_start_pfn[nid] < node_memory_c
  > hunk[j].start_pfn) {
  >  						printk("found a another chunk o
  > n nid %d, chunk %d\n", nid, j);
  > -
  > -						zholes_size[nid] =3D zholes_siz
  > e[nid] +
  > -							(node_memory_chunk[j].s
  > tart_pfn
  > -							 - node_end_pfn[nid]);
  >  						node_end_pfn[nid] =3D node_memo
  > ry_chunk[j].end_pfn;
  >  					}
  >  				}
  > @@ -396,10 +436,53 @@ printk("Begin table scan....\n");
  >  	wbinvd();
  >  }
  > =20
  > -unsigned long __init get_zholes_size(int nid)
  > +/* For each node run the memory list to determine whether there are
  > + * any memory holes.  For each hole determine which ZONE they fall
  > + * into.
  > + *
  > + * NOTE#1: this requires knowledge of the zone boundries and so
  > + * _cannot_ be performed before those are calculated in setup_memory.
  > + *=20
  > + * NOTE#2: we rely on the fact that the memory chunks are ordered by
  > + * start pfn number during setup.
  > + */
  > +static void __init get_zholes_init(void)
  >  {
  > +	int nid;
  > +	int c;
  > +	int first;
  > +	unsigned long end =3D 0;
  > +
  > +	for (nid =3D 0; nid < numnodes; nid++) {
  > +		first =3D 1;
  > +		for (c =3D 0; c < num_memory_chunks; c++){
  > +			if (node_memory_chunk[c].nid =3D=3D nid) {
  > +				if (first) {
  > +					end =3D node_memory_chunk[c].end_pfn;
  > +					first =3D 0;
  > +
  > +				} else {
  > +					/* Record any gap between this chunk
  > +					 * and the previous chunk on this node
  > +					 * against the zones it spans.
  > +					 */
  > +					chunk_to_zones(end,
  > +						node_memory_chunk[c].start_pfn,
  > +						&zholes_size[nid * MAX_NR_ZONES
  > ]);
  > +				}
  > +			}
  > +		}
  > +	}
  > +}
  > +
  > +unsigned long * __init get_zholes_size(int nid)
  > +{
  > +	if (!zholes_size_init) {
  > +		zholes_size_init++;
  > +		get_zholes_init();
  > +	}
  >  	if((nid >=3D numnodes) | (nid >=3D MAX_NUMNODES))
  >  		printk("%s: nid =3D %d is invalid. numnodes =3D %d",
  >  		       __FUNCTION__, nid, numnodes);
  > -	return zholes_size[nid];
  > +	return &zholes_size[nid * MAX_NR_ZONES];
  >  }
  > diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/arch/i386/mm/di
  > =
  > scontig.c linux-2.5.59-mjb6-zholes/arch/i386/mm/discontig.c
  > --- linux-2.5.59-mjb6/arch/i386/mm/discontig.c	2003-02-12 11:28:54.000
  > 00000=
  > 0 +0000
  > +++ linux-2.5.59-mjb6-zholes/arch/i386/mm/discontig.c	2003-02-13 13:0
  > 3:34.0=
  > 00000000 +0000
  > @@ -290,7 +290,7 @@ void __init zone_sizes_init(void)
  > =20
  >  	for (nid =3D 0; nid < numnodes; nid++) {
  >  		unsigned long zones_size[MAX_NR_ZONES] =3D {0, 0, 0};
  > -		unsigned long zholes_size;
  > +		unsigned long *zholes_size;
  >  		unsigned int max_dma;
  > =20
  >  		unsigned long low =3D max_low_pfn;
  > @@ -331,7 +331,7 @@ void __init zone_sizes_init(void)
  >  			lmem_map &=3D PAGE_MASK;
  >  			free_area_init_node(nid, NODE_DATA(nid),=20
  >  				(struct page *)lmem_map, zones_size,=20
  > -				start, (unsigned long *)zholes_size);
  > +				start, zholes_size);
  >  		}
  >  	}
  >  	return;
  > diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/include/asm-i38
  > =
  > 6/srat.h linux-2.5.59-mjb6-zholes/include/asm-i386/srat.h
  > --- linux-2.5.59-mjb6/include/asm-i386/srat.h	2003-02-12 11:28:55.000
  > 000000=
  >  +0000
  > +++ linux-2.5.59-mjb6-zholes/include/asm-i386/srat.h	2003-02-13 11:0
  > 2:43.00=
  > 0000000 +0000
  > @@ -34,7 +34,7 @@
  >  #define MAXCLUMPS		(MAX_CLUMPS_PER_NODE * MAX_NUMNODES)
  >  extern int pfn_to_nid(unsigned long);
  >  extern void get_memcfg_from_srat(void);
  > -extern unsigned long get_zholes_size(int);
  > +extern unsigned long *get_zholes_size(int);
  >  #define get_memcfg_numa() get_memcfg_from_srat()
  > =20
  >  /*
  > 
  > --=-VhECMrXvMAbfEiHvGxl0--
  > 



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][RFC] Discontigmem support for the x440
@ 2003-02-18 22:39 Andy Whitcroft
  2003-02-18 22:55 ` Patricia Gaughen
  0 siblings, 1 reply; 5+ messages in thread
From: Andy Whitcroft @ 2003-02-18 22:39 UTC (permalink / raw)
  To: gone; +Cc: linux-kernel, apw

[-- Attachment #1: Type: text/plain, Size: 2078 bytes --]

Pat,

Whilst looking at the Summit NUMA support I believe I have found a bug
in the memory hole handling.  Specifically, there appears to be a type
mismatch between get_zholes_size() returning a single long and
free_area_init_core() requiring a log array.  What I cannot adequately
explain is why this does not lead to a panic during boot.

Attached is a patch against 2.5.59-mjb6 which I believe should correct
this.  It has been tested in isolation and compile tested, but as I
don't have access to a test machine I cannot be sure it works.  I
believe some investigation is needed to understand why this bug does not
prevent booting, or lead to a large disparity in the zone free page
counts, perhaps the e820 map is helping here.

[gory details for the interested]
Under NUMA support  constructing the memory map we call
free_area_init_node() to initilialise the pglist_data and allocate the
memory map structures. As part of this we supply a per node, per memory
zone page count and a per node, per memory zone missing page count. 
These are used in free_area_init_core() to determine the true number of
pages per node, per zone.  In the existing summit code we parse the SRAT
in order to locate and size the inter-chunk gaps, on a per node basis. 
Later this is queried via get_zholes_size() from zone_init_sizes(). 
Unfortuantly, get_zholes_size is returning a single long representing
the per node total holes, whilst zone_init_sizes() requires an array of
longs one per zone (long[MAX_NR_ZONES]). In the zero holes case this
will be safe as if there are zero pages of hole then we pass an
apparently null pointer to get_zholes_size which is interpreted as
having no holes.  If the presence of any such holes a low-memory
reference would be passed potentially leading to an oops.

The attached patch modifies the memory chunk hole scan such that each
hole is allocated to one or more zones using the calculated zone
boundries, converting zholes_size[] from a per node count to a per node,
per zone count in a similar form to the associated zones[] array.

Cheers.

-apw


[-- Attachment #2: patch.mjb6-zholes --]
[-- Type: text/x-patch, Size: 5706 bytes --]

diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/arch/i386/kernel/srat.c linux-2.5.59-mjb6-zholes/arch/i386/kernel/srat.c
--- linux-2.5.59-mjb6/arch/i386/kernel/srat.c	2003-02-12 11:28:54.000000000 +0000
+++ linux-2.5.59-mjb6-zholes/arch/i386/kernel/srat.c	2003-02-13 13:17:08.000000000 +0000
@@ -50,7 +50,8 @@ static u8 pxm_bitmap[PXM_BITMAP_LEN];	/*
 struct node_memory_chunk_s node_memory_chunk[MAXCLUMPS];
 
 static int num_memory_chunks;		/* total number of memory chunks */
-static unsigned long zholes_size[MAX_NUMNODES];
+static int zholes_size_init;
+static unsigned long zholes_size[MAX_NUMNODES * MAX_NR_ZONES];
 
 unsigned long node_start_pfn[MAX_NUMNODES];
 unsigned long node_end_pfn[MAX_NUMNODES];
@@ -151,6 +152,49 @@ static void __init parse_memory_affinity
 		 "enabled and removable" : "enabled" ) );
 }
 
+#if MAX_NR_ZONES != 3
+#error "MAX_NR_ZONES != 3, chunk_to_zone requires review"
+#endif
+/* Take a chunk of pages from page frame cstart to cend and count the number
+ * of pages in each zone, returned via zones[].
+ */
+static __init void chunk_to_zones(unsigned long cstart, unsigned long cend, 
+		unsigned long *zones)
+{
+	unsigned long max_dma;
+	extern unsigned long max_low_pfn;
+
+	int z;
+	unsigned long rend;
+
+	/* FIXME: MAX_DMA_ADDRESS and max_low_pfn are trying to provide
+	 * similarly scoped information and should be handled in a consistant
+	 * manner.
+	 */
+	max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+	/* Split the hole into the zones in which it falls.  Repeatedly
+	 * take the segment in which the remaining hole starts, round it
+	 * to the end of that zone.
+	 */
+	memset(zones, 0, MAX_NR_ZONES * sizeof(long));
+	while (cstart < cend) {
+		if (cstart < max_dma) {
+			z = ZONE_DMA;
+			rend = (cend < max_dma)? cend : max_dma;
+
+		} else if (cstart < max_low_pfn) {
+			z = ZONE_NORMAL;
+			rend = (cend < max_low_pfn)? cend : max_low_pfn;
+
+		} else {
+			z = ZONE_HIGHMEM;
+			rend = cend;
+		}
+		zones[z] += rend - cstart;
+		cstart = rend;
+	}
+}
 
 /* Parse the ACPI Static Resource Affinity Table */
 static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
@@ -242,10 +286,6 @@ static int __init acpi20_parse_srat(stru
 				} else { /* We've found another chunk of memory for the node */
 					if (node_start_pfn[nid] < node_memory_chunk[j].start_pfn) {
 						printk("found a another chunk on nid %d, chunk %d\n", nid, j);
-
-						zholes_size[nid] = zholes_size[nid] +
-							(node_memory_chunk[j].start_pfn
-							 - node_end_pfn[nid]);
 						node_end_pfn[nid] = node_memory_chunk[j].end_pfn;
 					}
 				}
@@ -396,10 +436,53 @@ printk("Begin table scan....\n");
 	wbinvd();
 }
 
-unsigned long __init get_zholes_size(int nid)
+/* For each node run the memory list to determine whether there are
+ * any memory holes.  For each hole determine which ZONE they fall
+ * into.
+ *
+ * NOTE#1: this requires knowledge of the zone boundries and so
+ * _cannot_ be performed before those are calculated in setup_memory.
+ * 
+ * NOTE#2: we rely on the fact that the memory chunks are ordered by
+ * start pfn number during setup.
+ */
+static void __init get_zholes_init(void)
 {
+	int nid;
+	int c;
+	int first;
+	unsigned long end = 0;
+
+	for (nid = 0; nid < numnodes; nid++) {
+		first = 1;
+		for (c = 0; c < num_memory_chunks; c++){
+			if (node_memory_chunk[c].nid == nid) {
+				if (first) {
+					end = node_memory_chunk[c].end_pfn;
+					first = 0;
+
+				} else {
+					/* Record any gap between this chunk
+					 * and the previous chunk on this node
+					 * against the zones it spans.
+					 */
+					chunk_to_zones(end,
+						node_memory_chunk[c].start_pfn,
+						&zholes_size[nid * MAX_NR_ZONES]);
+				}
+			}
+		}
+	}
+}
+
+unsigned long * __init get_zholes_size(int nid)
+{
+	if (!zholes_size_init) {
+		zholes_size_init++;
+		get_zholes_init();
+	}
 	if((nid >= numnodes) | (nid >= MAX_NUMNODES))
 		printk("%s: nid = %d is invalid. numnodes = %d",
 		       __FUNCTION__, nid, numnodes);
-	return zholes_size[nid];
+	return &zholes_size[nid * MAX_NR_ZONES];
 }
diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/arch/i386/mm/discontig.c linux-2.5.59-mjb6-zholes/arch/i386/mm/discontig.c
--- linux-2.5.59-mjb6/arch/i386/mm/discontig.c	2003-02-12 11:28:54.000000000 +0000
+++ linux-2.5.59-mjb6-zholes/arch/i386/mm/discontig.c	2003-02-13 13:03:34.000000000 +0000
@@ -290,7 +290,7 @@ void __init zone_sizes_init(void)
 
 	for (nid = 0; nid < numnodes; nid++) {
 		unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-		unsigned long zholes_size;
+		unsigned long *zholes_size;
 		unsigned int max_dma;
 
 		unsigned long low = max_low_pfn;
@@ -331,7 +331,7 @@ void __init zone_sizes_init(void)
 			lmem_map &= PAGE_MASK;
 			free_area_init_node(nid, NODE_DATA(nid), 
 				(struct page *)lmem_map, zones_size, 
-				start, (unsigned long *)zholes_size);
+				start, zholes_size);
 		}
 	}
 	return;
diff -X /home/apw/bin/makediff.excl -rupN linux-2.5.59-mjb6/include/asm-i386/srat.h linux-2.5.59-mjb6-zholes/include/asm-i386/srat.h
--- linux-2.5.59-mjb6/include/asm-i386/srat.h	2003-02-12 11:28:55.000000000 +0000
+++ linux-2.5.59-mjb6-zholes/include/asm-i386/srat.h	2003-02-13 11:02:43.000000000 +0000
@@ -34,7 +34,7 @@
 #define MAXCLUMPS		(MAX_CLUMPS_PER_NODE * MAX_NUMNODES)
 extern int pfn_to_nid(unsigned long);
 extern void get_memcfg_from_srat(void);
-extern unsigned long get_zholes_size(int);
+extern unsigned long *get_zholes_size(int);
 #define get_memcfg_numa() get_memcfg_from_srat()
 
 /*

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH][RFC] Discontigmem support for the x440
@ 2003-02-06 20:16 Grover, Andrew
  0 siblings, 0 replies; 5+ messages in thread
From: Grover, Andrew @ 2003-02-06 20:16 UTC (permalink / raw)
  To: gone, linux-kernel; +Cc: chandra.sekharan, cleverdj, johnstul

> From: Patricia Gaughen [mailto:gone@us.ibm.com] 
> This patch provides discontigmem support for the IBM x440.  
> This code has 
> passed through the hands of several developers:  Chandra 
> Seetharaman, James 
> Cleverdon, John Stultz, and last to touch it, me :-)  This 
> patch requires full 
> acpi support.
> 
> I've tested this patch on an 8 way x440 16 GB of RAM with and 
> without HT 
> (acpi=off).
> 
> Any and all feedback regarding this patch is greatly appreciated.
> --- a/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
> +++ b/drivers/acpi/events/evevent.c	Wed Feb  5 19:15:58 2003
> @@ -104,6 +104,7 @@
>  
>  	ACPI_FUNCTION_TRACE ("ev_handler_initialize");
>  
> +	return_ACPI_STATUS (0);
>  
>  	/* Install the SCI handler */
>  

This part breaks ACPI event handling.

I'm guessing you just stuck that in there to get things working, but we
all need to figure out more of why this is an issue, and fix things
properly.

Other than that, thumbs up. SRAT support is a good thing to have.

Regards -- Andy

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2003-02-18 22:48 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2003-02-06  7:10 [PATCH][RFC] Discontigmem support for the x440 Patricia Gaughen
2003-02-08 19:23 ` Martin J. Bligh
2003-02-06 20:16 Grover, Andrew
2003-02-18 22:39 Andy Whitcroft
2003-02-18 22:55 ` Patricia Gaughen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).