All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/7] x86: count errors in testmmiotrace.ko
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
@ 2009-03-01 14:09 ` Pekka Paalanen
  2009-03-01 14:10 ` [PATCH 2/7] x86: add far read test to testmmiotrace Pekka Paalanen
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:09 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From e4f0af96ad7507fce1229d46dc68cbc09a5b61d3 Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sun, 22 Feb 2009 13:35:56 +0200
Subject: [PATCH] x86: count errors in testmmiotrace.ko

Check the read values against the written values in the MMIO read/write
test. This test shows if the given MMIO test area really works as
memory, which is a prerequisite for a successful mmiotrace test.

Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/testmmiotrace.c |   35 +++++++++++++++++++++++++++++------
 1 files changed, 29 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index ab50a8d..4b29f3b 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,5 +1,5 @@
 /*
- * Written by Pekka Paalanen, 2008 <pq@iki.fi>
+ * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
  */
 #include <linux/module.h>
 #include <linux/io.h>
@@ -11,28 +11,51 @@ static unsigned long mmio_address;
 module_param(mmio_address, ulong, 0);
 MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
 
+static unsigned v16(unsigned i)
+{
+	return i * 12 + 7;
+}
+
+static unsigned v32(unsigned i)
+{
+	return i * 212371 + 13;
+}
+
 static void do_write_test(void __iomem *p)
 {
 	unsigned int i;
 	mmiotrace_printk("Write test.\n");
+
 	for (i = 0; i < 256; i++)
 		iowrite8(i, p + i);
+
 	for (i = 1024; i < (5 * 1024); i += 2)
-		iowrite16(i * 12 + 7, p + i);
+		iowrite16(v16(i), p + i);
+
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
-		iowrite32(i * 212371 + 13, p + i);
+		iowrite32(v32(i), p + i);
 }
 
 static void do_read_test(void __iomem *p)
 {
 	unsigned int i;
+	unsigned errs[3] = { 0 };
 	mmiotrace_printk("Read test.\n");
+
 	for (i = 0; i < 256; i++)
-		ioread8(p + i);
+		if (ioread8(p + i) != i)
+			++errs[0];
+
 	for (i = 1024; i < (5 * 1024); i += 2)
-		ioread16(p + i);
+		if (ioread16(p + i) != v16(i))
+			++errs[1];
+
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
-		ioread32(p + i);
+		if (ioread32(p + i) != v32(i))
+			++errs[2];
+
+	mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
+						errs[0], errs[1], errs[2]);
 }
 
 static void do_test(void)
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/7] x86: add far read test to testmmiotrace
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
  2009-03-01 14:09 ` [PATCH 1/7] x86: count errors in testmmiotrace.ko Pekka Paalanen
@ 2009-03-01 14:10 ` Pekka Paalanen
  2009-03-01 14:11 ` [PATCH 3/7] x86 mmiotrace: WARN_ONCE if dis/arming a page fails Pekka Paalanen
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:10 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From d3f1d5f4a087c6e57cc1d2aa8fc2c82c23a5096a Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sun, 22 Feb 2009 14:09:25 +0200
Subject: [PATCH] x86: add far read test to testmmiotrace

Apparently pages far into an ioremapped region might not actually be
mapped during ioremap(). Add an optional read test to try to trigger a
multiply faulting MMIO access. Also add more messages to the kernel log
to help debugging.

This patch is based on a patch suggested by
Stuart Bennett <stuart@freedesktop.org>
who discovered bugs in mmiotrace related to normal kernel space faults.

Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/testmmiotrace.c |   35 ++++++++++++++++++++++++++++-------
 1 files changed, 28 insertions(+), 7 deletions(-)

diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 4b29f3b..427fd1b 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -9,7 +9,13 @@
 
 static unsigned long mmio_address;
 module_param(mmio_address, ulong, 0);
-MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
+MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
+				"(or 8 MB if read_far is non-zero).");
+
+static unsigned long read_far = 0x400100;
+module_param(read_far, ulong, 0);
+MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
+				"(default: 0x400100).");
 
 static unsigned v16(unsigned i)
 {
@@ -24,6 +30,7 @@ static unsigned v32(unsigned i)
 static void do_write_test(void __iomem *p)
 {
 	unsigned int i;
+	pr_info(MODULE_NAME ": write test.\n");
 	mmiotrace_printk("Write test.\n");
 
 	for (i = 0; i < 256; i++)
@@ -40,6 +47,7 @@ static void do_read_test(void __iomem *p)
 {
 	unsigned int i;
 	unsigned errs[3] = { 0 };
+	pr_info(MODULE_NAME ": read test.\n");
 	mmiotrace_printk("Read test.\n");
 
 	for (i = 0; i < 256; i++)
@@ -58,9 +66,17 @@ static void do_read_test(void __iomem *p)
 						errs[0], errs[1], errs[2]);
 }
 
-static void do_test(void)
+static void do_read_far_test(void __iomem *p)
+{
+	pr_info(MODULE_NAME ": read far test.\n");
+	mmiotrace_printk("Read far test.\n");
+
+	ioread32(p + read_far);
+}
+
+static void do_test(unsigned long size)
 {
-	void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
+	void __iomem *p = ioremap_nocache(mmio_address, size);
 	if (!p) {
 		pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
 		return;
@@ -68,11 +84,15 @@ static void do_test(void)
 	mmiotrace_printk("ioremap returned %p.\n", p);
 	do_write_test(p);
 	do_read_test(p);
+	if (read_far && read_far < size - 4)
+		do_read_far_test(p);
 	iounmap(p);
 }
 
 static int __init init(void)
 {
+	unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+
 	if (mmio_address == 0) {
 		pr_err(MODULE_NAME ": you have to use the module argument "
 							"mmio_address.\n");
@@ -81,10 +101,11 @@ static int __init init(void)
 		return -ENXIO;
 	}
 
-	pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
-					"in PCI address space, and writing "
-					"rubbish in there.\n", mmio_address);
-	do_test();
+	pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
+		"address space, and writing 16 kB of rubbish in there.\n",
+		 size >> 10, mmio_address);
+	do_test(size);
+	pr_info(MODULE_NAME ": All done.\n");
 	return 0;
 }
 
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/7] x86 mmiotrace: WARN_ONCE if dis/arming a page fails
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
  2009-03-01 14:09 ` [PATCH 1/7] x86: count errors in testmmiotrace.ko Pekka Paalanen
  2009-03-01 14:10 ` [PATCH 2/7] x86: add far read test to testmmiotrace Pekka Paalanen
@ 2009-03-01 14:11 ` Pekka Paalanen
  2009-03-01 14:11 ` [PATCH 4/7] x86 mmiotrace: fix save/restore page table state Pekka Paalanen
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:11 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From 2f1f600e019955a0485452e4da676837bc8c91bd Mon Sep 17 00:00:00 2001
From: Stuart Bennett <stuart@freedesktop.org>
Date: Fri, 30 Jan 2009 17:38:59 +0000
Subject: [PATCH] x86 mmiotrace: WARN_ONCE if dis/arming a page fails

Print a full warning once, if arming or disarming a page fails.

Also, if initial arming fails, do not handle the page further. This
avoids the possibility of a page failing to arm and then later claiming
to have handled any fault on that page.

WARN_ONCE added by Pekka Paalanen.

Signed-off-by: Stuart Bennett <stuart@freedesktop.org>
Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/kmmio.c |   25 +++++++++++++++++--------
 1 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 93d8203..fb1f115 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -105,7 +105,7 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
 	return NULL;
 }
 
-static void set_page_present(unsigned long addr, bool present,
+static int set_page_present(unsigned long addr, bool present,
 							unsigned int *pglevel)
 {
 	pteval_t pteval;
@@ -116,7 +116,7 @@ static void set_page_present(unsigned long addr, bool present,
 
 	if (!pte) {
 		pr_err("kmmio: no pte for page 0x%08lx\n", addr);
-		return;
+		return -1;
 	}
 
 	if (pglevel)
@@ -140,22 +140,27 @@ static void set_page_present(unsigned long addr, bool present,
 
 	default:
 		pr_err("kmmio: unexpected page level 0x%x.\n", level);
-		return;
+		return -1;
 	}
 
 	__flush_tlb_one(addr);
+
+	return 0;
 }
 
 /** Mark the given page as not present. Access to it will trigger a fault. */
-static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+static int arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
 {
-	set_page_present(page & PAGE_MASK, false, pglevel);
+	int ret = set_page_present(page & PAGE_MASK, false, pglevel);
+	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", page);
+	return ret;
 }
 
 /** Mark the given page as present. */
 static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
 {
-	set_page_present(page & PAGE_MASK, true, pglevel);
+	int ret = set_page_present(page & PAGE_MASK, true, pglevel);
+	WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", page);
 }
 
 /*
@@ -326,9 +331,13 @@ static int add_kmmio_fault_page(unsigned long page)
 
 	f->count = 1;
 	f->page = page;
-	list_add_rcu(&f->list, kmmio_page_list(f->page));
 
-	arm_kmmio_fault_page(f->page, NULL);
+	if (arm_kmmio_fault_page(f->page, NULL)) {
+		kfree(f);
+		return -1;
+	}
+
+	list_add_rcu(&f->list, kmmio_page_list(f->page));
 
 	return 0;
 }
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/7] x86 mmiotrace: fix save/restore page table state
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
                   ` (2 preceding siblings ...)
  2009-03-01 14:11 ` [PATCH 3/7] x86 mmiotrace: WARN_ONCE if dis/arming a page fails Pekka Paalanen
@ 2009-03-01 14:11 ` Pekka Paalanen
  2009-03-01 14:12 ` [PATCH 5/7] x86 mmiotrace: split set_page_presence() Pekka Paalanen
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:11 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From baa99e2b32449ec7bf147c234adfa444caecac8a Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sun, 22 Feb 2009 20:02:43 +0200
Subject: [PATCH] x86 mmiotrace: fix save/restore page table state

Blindly setting _PAGE_PRESENT in disarm_kmmio_fault_page() overlooks the
possibility, that the page was not present when it was armed.

Make arm_kmmio_fault_page() store the previous page presence in struct
kmmio_fault_page and use it on disarm.

This patch was originally written by Stuart Bennett, but Pekka Paalanen
rewrote it a little different.

Cc: Stuart Bennett <stuart@freedesktop.org>
Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/kmmio.c |   66 ++++++++++++++++++++++++++++++++++-----------------
 1 files changed, 44 insertions(+), 22 deletions(-)

diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index fb1f115..be361eb 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -32,6 +32,8 @@ struct kmmio_fault_page {
 	struct list_head list;
 	struct kmmio_fault_page *release_next;
 	unsigned long page; /* location of the fault page */
+	bool old_presence; /* page presence prior to arming */
+	bool armed;
 
 	/*
 	 * Number of times this page has been registered as a part
@@ -105,8 +107,7 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
 	return NULL;
 }
 
-static int set_page_present(unsigned long addr, bool present,
-							unsigned int *pglevel)
+static int set_page_presence(unsigned long addr, bool present, bool *old)
 {
 	pteval_t pteval;
 	pmdval_t pmdval;
@@ -119,20 +120,21 @@ static int set_page_present(unsigned long addr, bool present,
 		return -1;
 	}
 
-	if (pglevel)
-		*pglevel = level;
-
 	switch (level) {
 	case PG_LEVEL_2M:
 		pmd = (pmd_t *)pte;
-		pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
+		pmdval = pmd_val(*pmd);
+		*old = !!(pmdval & _PAGE_PRESENT);
+		pmdval &= ~_PAGE_PRESENT;
 		if (present)
 			pmdval |= _PAGE_PRESENT;
 		set_pmd(pmd, __pmd(pmdval));
 		break;
 
 	case PG_LEVEL_4K:
-		pteval = pte_val(*pte) & ~_PAGE_PRESENT;
+		pteval = pte_val(*pte);
+		*old = !!(pteval & _PAGE_PRESENT);
+		pteval &= ~_PAGE_PRESENT;
 		if (present)
 			pteval |= _PAGE_PRESENT;
 		set_pte_atomic(pte, __pte(pteval));
@@ -148,19 +150,39 @@ static int set_page_present(unsigned long addr, bool present,
 	return 0;
 }
 
-/** Mark the given page as not present. Access to it will trigger a fault. */
-static int arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/*
+ * Mark the given page as not present. Access to it will trigger a fault.
+ *
+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
+ * protection is ignored here. RCU read lock is assumed held, so the struct
+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,
+ * that double arming the same virtual address (page) cannot occur.
+ *
+ * Double disarming on the other hand is allowed, and may occur when a fault
+ * and mmiotrace shutdown happen simultaneously.
+ */
+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-	int ret = set_page_present(page & PAGE_MASK, false, pglevel);
-	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", page);
+	int ret;
+	WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+	if (f->armed) {
+		pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
+					f->page, f->count, f->old_presence);
+	}
+	ret = set_page_presence(f->page, false, &f->old_presence);
+	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+	f->armed = true;
 	return ret;
 }
 
-/** Mark the given page as present. */
-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/** Restore the given page to saved presence state. */
+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-	int ret = set_page_present(page & PAGE_MASK, true, pglevel);
-	WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", page);
+	bool tmp;
+	int ret = set_page_presence(f->page, f->old_presence, &tmp);
+	WARN_ONCE(ret < 0,
+			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+	f->armed = false;
 }
 
 /*
@@ -207,7 +229,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 
 	ctx = &get_cpu_var(kmmio_ctx);
 	if (ctx->active) {
-		disarm_kmmio_fault_page(faultpage->page, NULL);
+		disarm_kmmio_fault_page(faultpage);
 		if (addr == ctx->addr) {
 			/*
 			 * On SMP we sometimes get recursive probe hits on the
@@ -249,7 +271,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 	regs->flags &= ~X86_EFLAGS_IF;
 
 	/* Now we set present bit in PTE and single step. */
-	disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+	disarm_kmmio_fault_page(ctx->fpage);
 
 	/*
 	 * If another cpu accesses the same page while we are stepping,
@@ -288,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
 	if (ctx->probe && ctx->probe->post_handler)
 		ctx->probe->post_handler(ctx->probe, condition, regs);
 
-	arm_kmmio_fault_page(ctx->fpage->page, NULL);
+	arm_kmmio_fault_page(ctx->fpage);
 
 	regs->flags &= ~X86_EFLAGS_TF;
 	regs->flags |= ctx->saved_flags;
@@ -320,19 +342,19 @@ static int add_kmmio_fault_page(unsigned long page)
 	f = get_kmmio_fault_page(page);
 	if (f) {
 		if (!f->count)
-			arm_kmmio_fault_page(f->page, NULL);
+			arm_kmmio_fault_page(f);
 		f->count++;
 		return 0;
 	}
 
-	f = kmalloc(sizeof(*f), GFP_ATOMIC);
+	f = kzalloc(sizeof(*f), GFP_ATOMIC);
 	if (!f)
 		return -1;
 
 	f->count = 1;
 	f->page = page;
 
-	if (arm_kmmio_fault_page(f->page, NULL)) {
+	if (arm_kmmio_fault_page(f)) {
 		kfree(f);
 		return -1;
 	}
@@ -356,7 +378,7 @@ static void release_kmmio_fault_page(unsigned long page,
 	f->count--;
 	BUG_ON(f->count < 0);
 	if (!f->count) {
-		disarm_kmmio_fault_page(f->page, NULL);
+		disarm_kmmio_fault_page(f);
 		f->release_next = *release_list;
 		*release_list = f;
 	}
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 5/7] x86 mmiotrace: split set_page_presence()
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
                   ` (3 preceding siblings ...)
  2009-03-01 14:11 ` [PATCH 4/7] x86 mmiotrace: fix save/restore page table state Pekka Paalanen
@ 2009-03-01 14:12 ` Pekka Paalanen
  2009-03-01 14:13 ` [PATCH 6/7] x86 mmiotrace: improve handling of secondary faults Pekka Paalanen
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:12 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From 36772dcb6ffbbb68254cbfc379a103acd2fbfefc Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sat, 28 Feb 2009 21:34:59 +0200
Subject: [PATCH] x86 mmiotrace: split set_page_presence()

Split set_page_presence() in kmmio.c into two more functions set_pmd_presence()
and set_pte_presence(). Purely code reorganization, no functional changes.

Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/kmmio.c |   41 ++++++++++++++++++++++-------------------
 1 files changed, 22 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index be361eb..d297775 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -107,12 +107,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
 	return NULL;
 }
 
+static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
+{
+	pmdval_t v = pmd_val(*pmd);
+	*old = !!(v & _PAGE_PRESENT);
+	v &= ~_PAGE_PRESENT;
+	if (present)
+		v |= _PAGE_PRESENT;
+	set_pmd(pmd, __pmd(v));
+}
+
+static void set_pte_presence(pte_t *pte, bool present, bool *old)
+{
+	pteval_t v = pte_val(*pte);
+	*old = !!(v & _PAGE_PRESENT);
+	v &= ~_PAGE_PRESENT;
+	if (present)
+		v |= _PAGE_PRESENT;
+	set_pte_atomic(pte, __pte(v));
+}
+
 static int set_page_presence(unsigned long addr, bool present, bool *old)
 {
-	pteval_t pteval;
-	pmdval_t pmdval;
 	unsigned int level;
-	pmd_t *pmd;
 	pte_t *pte = lookup_address(addr, &level);
 
 	if (!pte) {
@@ -122,31 +139,17 @@ static int set_page_presence(unsigned long addr, bool present, bool *old)
 
 	switch (level) {
 	case PG_LEVEL_2M:
-		pmd = (pmd_t *)pte;
-		pmdval = pmd_val(*pmd);
-		*old = !!(pmdval & _PAGE_PRESENT);
-		pmdval &= ~_PAGE_PRESENT;
-		if (present)
-			pmdval |= _PAGE_PRESENT;
-		set_pmd(pmd, __pmd(pmdval));
+		set_pmd_presence((pmd_t *)pte, present, old);
 		break;
-
 	case PG_LEVEL_4K:
-		pteval = pte_val(*pte);
-		*old = !!(pteval & _PAGE_PRESENT);
-		pteval &= ~_PAGE_PRESENT;
-		if (present)
-			pteval |= _PAGE_PRESENT;
-		set_pte_atomic(pte, __pte(pteval));
+		set_pte_presence(pte, present, old);
 		break;
-
 	default:
 		pr_err("kmmio: unexpected page level 0x%x.\n", level);
 		return -1;
 	}
 
 	__flush_tlb_one(addr);
-
 	return 0;
 }
 
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 6/7] x86 mmiotrace: improve handling of secondary faults
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
                   ` (4 preceding siblings ...)
  2009-03-01 14:12 ` [PATCH 5/7] x86 mmiotrace: split set_page_presence() Pekka Paalanen
@ 2009-03-01 14:13 ` Pekka Paalanen
  2009-03-01 14:14 ` [PATCH 7/7] x86 mmiotrace: fix race with release_kmmio_fault_page() Pekka Paalanen
  2009-03-02  9:23 ` [PATCH 0/7] mmiotrace: enhance01 Ingo Molnar
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:13 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From bea20d8a6f5444286372d102bb6abbf7313aca6e Mon Sep 17 00:00:00 2001
From: Stuart Bennett <stuart@freedesktop.org>
Date: Thu, 5 Feb 2009 11:02:02 +0000
Subject: [PATCH] x86 mmiotrace: improve handling of secondary faults

Upgrade some kmmio.c debug messages to warnings.
Allow secondary faults on probed pages to fall through, and only log
secondary faults that are not due to non-present pages.

Patch edited by Pekka Paalanen.

Signed-off-by: Stuart Bennett <stuart@freedesktop.org>
Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/kmmio.c |   40 ++++++++++++++++++++++------------------
 1 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index d297775..4c66bd3 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -232,28 +232,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 
 	ctx = &get_cpu_var(kmmio_ctx);
 	if (ctx->active) {
-		disarm_kmmio_fault_page(faultpage);
 		if (addr == ctx->addr) {
 			/*
-			 * On SMP we sometimes get recursive probe hits on the
-			 * same address. Context is already saved, fall out.
+			 * A second fault on the same page means some other
+			 * condition needs handling by do_page_fault(), the
+			 * page really not being present is the most common.
 			 */
-			pr_debug("kmmio: duplicate probe hit on CPU %d, for "
-						"address 0x%08lx.\n",
-						smp_processor_id(), addr);
-			ret = 1;
-			goto no_kmmio_ctx;
-		}
-		/*
-		 * Prevent overwriting already in-flight context.
-		 * This should not happen, let's hope disarming at least
-		 * prevents a panic.
-		 */
-		pr_emerg("kmmio: recursive probe hit on CPU %d, "
+			pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
+					addr, smp_processor_id());
+
+			if (!faultpage->old_presence)
+				pr_info("kmmio: unexpected secondary hit for "
+					"address 0x%08lx on CPU %d.\n", addr,
+					smp_processor_id());
+		} else {
+			/*
+			 * Prevent overwriting already in-flight context.
+			 * This should not happen, let's hope disarming at
+			 * least prevents a panic.
+			 */
+			pr_emerg("kmmio: recursive probe hit on CPU %d, "
 					"for address 0x%08lx. Ignoring.\n",
 					smp_processor_id(), addr);
-		pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
-					ctx->addr);
+			pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
+						ctx->addr);
+			disarm_kmmio_fault_page(faultpage);
+		}
 		goto no_kmmio_ctx;
 	}
 	ctx->active++;
@@ -305,7 +309,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
 	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
 
 	if (!ctx->active) {
-		pr_debug("kmmio: spurious debug trap on CPU %d.\n",
+		pr_warning("kmmio: spurious debug trap on CPU %d.\n",
 							smp_processor_id());
 		goto out;
 	}
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 7/7] x86 mmiotrace: fix race with release_kmmio_fault_page()
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
                   ` (5 preceding siblings ...)
  2009-03-01 14:13 ` [PATCH 6/7] x86 mmiotrace: improve handling of secondary faults Pekka Paalanen
@ 2009-03-01 14:14 ` Pekka Paalanen
  2009-03-02  9:23 ` [PATCH 0/7] mmiotrace: enhance01 Ingo Molnar
  7 siblings, 0 replies; 8+ messages in thread
From: Pekka Paalanen @ 2009-03-01 14:14 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar; +Cc: Pekka Paalanen, Stuart Bennett, linux-kernel

>From c7bc3125d414938b3340a87516ecf47cd27efea4 Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Tue, 24 Feb 2009 21:44:15 +0200
Subject: [PATCH] x86 mmiotrace: fix race with release_kmmio_fault_page()

There was a theoretical possibility to a race between arming a page in
post_kmmio_handler() and disarming the page in
release_kmmio_fault_page():

cpu0                             cpu1
------------------------------------------------------------------
mmiotrace shutdown
enter release_kmmio_fault_page
                                 fault on the page
                                 disarm the page
disarm the page
                                 handle the MMIO access
                                 re-arm the page
put the page on release list
remove_kmmio_fault_pages()
                                 fault on the page
                                 page not known to mmiotrace
                                 fall back to do_page_fault()
                                 *KABOOM*

(This scenario also shows the double disarm case which is allowed.)

Fixed by acquiring kmmio_lock in post_kmmio_handler() and checking
if the page is being released from mmiotrace.

Signed-off-by: Pekka Paalanen <pq@iki.fi>
---
 arch/x86/mm/kmmio.c |    9 +++++++--
 1 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 4c66bd3..9f20503 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -38,7 +38,8 @@ struct kmmio_fault_page {
 	/*
 	 * Number of times this page has been registered as a part
 	 * of a probe. If zero, page is disarmed and this may be freed.
-	 * Used only by writers (RCU).
+	 * Used only by writers (RCU) and post_kmmio_handler().
+	 * Protected by kmmio_lock, when linked into kmmio_page_table.
 	 */
 	int count;
 };
@@ -317,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
 	if (ctx->probe && ctx->probe->post_handler)
 		ctx->probe->post_handler(ctx->probe, condition, regs);
 
-	arm_kmmio_fault_page(ctx->fpage);
+	/* Prevent racing against release_kmmio_fault_page(). */
+	spin_lock(&kmmio_lock);
+	if (ctx->fpage->count)
+		arm_kmmio_fault_page(ctx->fpage);
+	spin_unlock(&kmmio_lock);
 
 	regs->flags &= ~X86_EFLAGS_TF;
 	regs->flags |= ctx->saved_flags;
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/7] mmiotrace: enhance01
       [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
                   ` (6 preceding siblings ...)
  2009-03-01 14:14 ` [PATCH 7/7] x86 mmiotrace: fix race with release_kmmio_fault_page() Pekka Paalanen
@ 2009-03-02  9:23 ` Ingo Molnar
  7 siblings, 0 replies; 8+ messages in thread
From: Ingo Molnar @ 2009-03-02  9:23 UTC (permalink / raw)
  To: Pekka Paalanen; +Cc: Steven Rostedt, Stuart Bennett, linux-kernel


* Pekka Paalanen <pq@iki.fi> wrote:

> Hi all,
> 
> the following patch set is mostly mmiotrace stability and bug fixes.
> It is meant for 2.6.30, has been developed on linus/master (post 2.6.29-rc6),
> and applies cleanly to tip/master.
> 
> It has been tested on Athlon64 3000+ (x86_64) by testmmiotrace.ko and
> tracing the Nouveau driver, and also on Core 2 Duo (Thinkpad T61, x86_64)
> by testmmiotrace.ko and tracing the Nvidia proprietary driver.
> 
> Special thanks to Stuart, who provided most of the fixes.
> 
>  arch/x86/mm/kmmio.c         |  149 +++++++++++++++++++++++++++---------------
>  arch/x86/mm/testmmiotrace.c |   70 ++++++++++++++++----
>  2 files changed, 153 insertions(+), 66 deletions(-)

Applied to tip:tracing/mmiotrace, thanks guys!

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2009-03-02  9:24 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20090301160643.05a2219b@daedalus.pq.iki.fi>
2009-03-01 14:09 ` [PATCH 1/7] x86: count errors in testmmiotrace.ko Pekka Paalanen
2009-03-01 14:10 ` [PATCH 2/7] x86: add far read test to testmmiotrace Pekka Paalanen
2009-03-01 14:11 ` [PATCH 3/7] x86 mmiotrace: WARN_ONCE if dis/arming a page fails Pekka Paalanen
2009-03-01 14:11 ` [PATCH 4/7] x86 mmiotrace: fix save/restore page table state Pekka Paalanen
2009-03-01 14:12 ` [PATCH 5/7] x86 mmiotrace: split set_page_presence() Pekka Paalanen
2009-03-01 14:13 ` [PATCH 6/7] x86 mmiotrace: improve handling of secondary faults Pekka Paalanen
2009-03-01 14:14 ` [PATCH 7/7] x86 mmiotrace: fix race with release_kmmio_fault_page() Pekka Paalanen
2009-03-02  9:23 ` [PATCH 0/7] mmiotrace: enhance01 Ingo Molnar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.