linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] kmap tracking
@ 2006-05-18 22:53 Randy.Dunlap
  2006-05-22 16:43 ` Zach Brown
  0 siblings, 1 reply; 7+ messages in thread
From: Randy.Dunlap @ 2006-05-18 22:53 UTC (permalink / raw)
  To: lkml; +Cc: akpm

From: Randy Dunlap <rdunlap@xenotime.net>

Track kmap/kunmap call history, storing caller function address,
action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
Based on a patch to 2.4.21 by Zach Brown that was used successfully
at Oracle to track down some kmap/kunmap problems.
Built with CONFIG_DEBUG_KMAP =y and =n.

Creates "kmap-history" in debugfs.
Any write to kmap-history clears the history table.

Output format is (all tab-separated):
table_index action_index next_flag caller_function+offset/size refcount jiffies

Example:
99      0       -       do_execve+0x133/0x1bf   2       39340
99      1       -       do_execve+0x133/0x1bf   2       39340
99      2       +       proc_pid_cmdline+0x53/0xeb      2       4294946079
99      3       -       proc_pid_cmdline+0x53/0xeb      2       4294946079
99      4       -       copy_strings_kernel+0x24/0x2b   2       39340
99      5       -       copy_strings_kernel+0x24/0x2b   2       39340
99      6       -       do_execve+0x11d/0x1bf   2       39340
99      7       -       do_execve+0x11d/0x1bf   2       39340

+ = next; next entry for this table_index goes here.

Each history table row tracks up to 8 actions on it.  This is a
circular history table per kmap address row, so the 8 most recent
actions are stored and older actions on each row are lost.

Todo:  add tracking for kmap* variants (atomic, pfn, etc.).

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
---
 arch/i386/Kconfig.debug    |    8 +
 arch/i386/mm/highmem.c     |   19 ----
 include/asm-i386/highmem.h |   49 +++++++++++
 lib/Kconfig.debug          |    2 
 mm/highmem.c               |  187 +++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 242 insertions(+), 23 deletions(-)

--- linux-2.6.17-rc4.orig/mm/highmem.c
+++ linux-2.6.17-rc4/mm/highmem.c
@@ -27,8 +27,50 @@
 #include <linux/hash.h>
 #include <linux/highmem.h>
 #include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
+#include <linux/kallsyms.h>
 #include <asm/tlbflush.h>
 
+#ifdef CONFIG_DEBUG_KMAP
+
+#define NUM_ACTIONS 8
+#define NUM_ACTIONS_MASK (NUM_ACTIONS - 1)
+
+struct kmap_action {
+	unsigned long caller;	/* caller's return address */
+	int what;	/* add, decrement, etc. */
+	int count;	/* reference count */
+	unsigned long jiffies;
+};
+
+enum {
+	KMAP_FIRST = 1,
+	KMAP_ADDREF,
+	KMAP_DECREF,
+	KMAP_LAST,
+};
+
+struct kmap_hist {
+	unsigned int next;
+	struct kmap_action acts[NUM_ACTIONS];
+} kh_running[LAST_PKMAP];
+
+static inline void kmap_record_action(unsigned int nr, int action,
+				int refcount, void *retaddr)
+{
+	struct kmap_hist *kh = &kh_running[nr];
+	struct kmap_action *act = &kh->acts[kh->next];
+
+	act->caller = (unsigned long)retaddr;
+	act->what = action;
+	act->count = refcount;
+	act->jiffies = jiffies;
+	kh->next = (kh->next + 1) & NUM_ACTIONS_MASK;
+}
+#else
+#define kmap_record_action(nr, action, refcount, retaddr) do {} while (0)
+#endif
+
 static mempool_t *page_pool, *isa_page_pool;
 
 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
@@ -142,7 +184,11 @@ start:
 	return vaddr;
 }
 
+#ifdef CONFIG_DEBUG_KMAP
+void fastcall *kmap_high(struct page *page, void *retaddr)
+#else
 void fastcall *kmap_high(struct page *page)
+#endif
 {
 	unsigned long vaddr;
 
@@ -157,6 +203,8 @@ void fastcall *kmap_high(struct page *pa
 	if (!vaddr)
 		vaddr = map_new_virtual(page);
 	pkmap_count[PKMAP_NR(vaddr)]++;
+	kmap_record_action(PKMAP_NR(vaddr), KMAP_ADDREF,
+			pkmap_count[PKMAP_NR(vaddr)], retaddr);
 	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
 	spin_unlock(&kmap_lock);
 	return (void*) vaddr;
@@ -164,7 +212,11 @@ void fastcall *kmap_high(struct page *pa
 
 EXPORT_SYMBOL(kmap_high);
 
+#ifdef CONFIG_DEBUG_KMAP
+void fastcall kunmap_high(struct page *page, void *retaddr)
+#else
 void fastcall kunmap_high(struct page *page)
+#endif
 {
 	unsigned long vaddr;
 	unsigned long nr;
@@ -175,6 +227,13 @@ void fastcall kunmap_high(struct page *p
 	BUG_ON(!vaddr);
 	nr = PKMAP_NR(vaddr);
 
+	if (pkmap_count[nr] == 2)
+		kmap_record_action(PKMAP_NR(vaddr), KMAP_LAST,
+				pkmap_count[nr], retaddr);
+	else
+		kmap_record_action(PKMAP_NR(vaddr), KMAP_DECREF,
+				pkmap_count[nr], retaddr);
+
 	/*
 	 * A count must never go down to zero
 	 * without a TLB flush!
@@ -600,3 +659,131 @@ void __init page_address_init(void)
 }
 
 #endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
+
+/* --------------------------------------------------- */
+
+#ifdef CONFIG_DEBUG_KMAP
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static void *kmap_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct kmap_hist *kha = seq->private;
+
+	if (*pos < 0 || *pos >= LAST_PKMAP)
+		return NULL;
+	return &kha[*pos];
+}
+
+static void *kmap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return kmap_seq_start(seq, pos);
+}
+
+static int kmap_seq_show(struct seq_file *seq, void *v)
+{
+	struct kmap_hist *khbase = seq->private;
+	struct kmap_hist *kh = v;
+	struct kmap_action *act;
+	unsigned int ind;
+	const char *name;
+	char *modname;
+	unsigned long offset, size;
+	char namebuf[KSYM_NAME_LEN + 1];
+	char caller[256];
+
+	spin_lock(&kmap_lock);
+	for (ind = 0; ind < NUM_ACTIONS; ind++) {
+		act = &kh->acts[ind];
+		name = kallsyms_lookup(act->caller, &size, &offset,
+			&modname, namebuf);
+		if (!name)
+			sprintf(caller, "0x%lx", act->caller);
+		else if (modname)
+			sprintf(caller, "%s+%#lx/%#lx [%s]", name, offset,
+				size, modname);
+		else
+			sprintf(caller, "%s+%#lx/%#lx", name, offset, size);
+		seq_printf(seq, "%u\t%u\t%c\t%s\t%d\t%lu\n",
+				kh - &khbase[0], ind,
+				ind == kh->next ? '+' : '-',
+				caller, act->count,
+				act->jiffies);
+	}
+	spin_unlock(&kmap_lock);
+
+	return 0;
+}
+
+static void kmap_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static struct seq_operations kmap_seq_ops = {
+	.start  = kmap_seq_start,
+	.next   = kmap_seq_next,
+	.stop   = kmap_seq_stop,
+	.show   = kmap_seq_show,
+};
+
+static int kmap_shared_open(struct inode *inode, struct file *file,
+			  struct kmap_hist *source)
+{
+	int ret;
+
+	ret = seq_open(file, &kmap_seq_ops);
+	if (!ret) {
+		struct seq_file *seq = file->private_data;
+		seq->private = source;
+	}
+	return ret;
+}
+
+/* *any* write here clears the kmap history tables */
+static ssize_t kmap_reset_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *pos)
+{
+	spin_lock(&kmap_lock);
+	memset(kh_running, 0, sizeof(kh_running));
+	spin_unlock(&kmap_lock);
+	return count;
+}
+
+static int kmap_running_fop_open(struct inode *inode, struct file *file)
+{
+	return kmap_shared_open(inode, file, kh_running);
+}
+
+struct file_operations kmap_running_seq_fops = {
+	.open = kmap_running_fop_open,
+	.read = seq_read,
+	.write = kmap_reset_write,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static struct dentry *kmap_history_file;
+
+static __init int kmap_history_init(void)
+{
+	kmap_history_file = debugfs_create_file("kmap-history", 0644, NULL,
+			kh_running, &kmap_running_seq_fops);
+	if (!kmap_history_file)
+		goto out1;
+
+	return 0;
+
+out1:
+	return -ENOMEM;
+}
+
+static void kmap_history_exit(void)
+{
+	debugfs_remove(kmap_history_file);
+}
+
+subsys_initcall(kmap_history_init);
+module_exit(kmap_history_exit);
+#endif /* CONFIG_DEBUG_KMAP */
--- linux-2.6.17-rc4.orig/arch/i386/mm/highmem.c
+++ linux-2.6.17-rc4/arch/i386/mm/highmem.c
@@ -1,23 +1,6 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 
-void *kmap(struct page *page)
-{
-	might_sleep();
-	if (!PageHighMem(page))
-		return page_address(page);
-	return kmap_high(page);
-}
-
-void kunmap(struct page *page)
-{
-	if (in_interrupt())
-		BUG();
-	if (!PageHighMem(page))
-		return;
-	kunmap_high(page);
-}
-
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  * no global lock is needed and because the kmap code must perform a global TLB
@@ -106,8 +89,6 @@ struct page *kmap_atomic_to_page(void *p
 	return pte_page(*pte);
 }
 
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
 EXPORT_SYMBOL(kmap_atomic_to_page);
--- linux-2.6.17-rc4.orig/include/asm-i386/highmem.h
+++ linux-2.6.17-rc4/include/asm-i386/highmem.h
@@ -63,11 +63,54 @@ extern pte_t *pkmap_page_table;
 #define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
 #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 
-extern void * FASTCALL(kmap_high(struct page *page));
+#ifndef CONFIG_DEBUG_KMAP
+extern void *FASTCALL(kmap_high(struct page *page));
 extern void FASTCALL(kunmap_high(struct page *page));
 
-void *kmap(struct page *page);
-void kunmap(struct page *page);
+#define kmap(page)	__kmap(page)
+#define kunmap(page)	__kunmap(page)
+
+static inline void *__kmap(struct page *page)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_high(page);
+}
+
+static inline void __kunmap(struct page *page)
+{
+	if (in_interrupt())
+		BUG();
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+#else
+extern void *FASTCALL(kmap_high(struct page *page, void *retaddr));
+extern void FASTCALL(kunmap_high(struct page *page, void *retaddr));
+
+#define kmap(page)	__kmap(page, __builtin_return_address(0))
+#define kunmap(page)	__kunmap(page, __builtin_return_address(0))
+
+static inline void *__kmap(struct page *page, void *retaddr)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_high(page, retaddr);
+}
+
+static inline void __kunmap(struct page *page, void *retaddr)
+{
+	if (in_interrupt())
+		BUG();
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page, retaddr);
+}
+#endif
+
 void *kmap_atomic(struct page *page, enum km_type type);
 void kunmap_atomic(void *kvaddr, enum km_type type);
 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
--- linux-2.6.17-rc4.orig/arch/i386/Kconfig.debug
+++ linux-2.6.17-rc4/arch/i386/Kconfig.debug
@@ -51,6 +51,14 @@ config DEBUG_PAGEALLOC
 	  This results in a large slowdown, but helps to find certain types
 	  of memory corruptions.
 
+config DEBUG_KMAP
+	bool "Keep history of kmap/kunmap actions"
+	depends on DEBUG_KERNEL && HIGHMEM
+	help
+	  Keep a table of kmap/kunmap actions for developer debugging.
+	  This table is accessible via debugfs, file "kmap-history".
+	  The data in it can be cleared (reset) by any write to it.
+
 config DEBUG_RODATA
 	bool "Write protect kernel read-only data structures"
 	depends on DEBUG_KERNEL
--- linux-2.6.17-rc4.orig/lib/Kconfig.debug
+++ linux-2.6.17-rc4/lib/Kconfig.debug
@@ -134,7 +134,7 @@ config DEBUG_HIGHMEM
 	bool "Highmem debugging"
 	depends on DEBUG_KERNEL && HIGHMEM
 	help
-	  This options enables addition error checking for high memory systems.
+	  Enables additional error checking for high memory systems.
 	  Disable for production systems.
 
 config DEBUG_BUGVERBOSE


---

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-18 22:53 [PATCH] kmap tracking Randy.Dunlap
@ 2006-05-22 16:43 ` Zach Brown
  2006-05-22 17:39   ` Andrew Morton
  2006-05-23 18:42   ` Randy.Dunlap
  0 siblings, 2 replies; 7+ messages in thread
From: Zach Brown @ 2006-05-22 16:43 UTC (permalink / raw)
  To: Randy.Dunlap; +Cc: lkml, akpm

Randy.Dunlap wrote:
> From: Randy Dunlap <rdunlap@xenotime.net>
> 
> Track kmap/kunmap call history, storing caller function address,
> action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
> Based on a patch to 2.4.21 by Zach Brown that was used successfully
> at Oracle to track down some kmap/kunmap problems.

Thanks for bringing this to 2.6.. sorry for the lag in reviewing.

> +enum {
> +	KMAP_FIRST = 1,
> +	KMAP_ADDREF,
> +	KMAP_DECREF,
> +	KMAP_LAST,
> +};

I trust you got rid of these in the most recent version :)

> +#else
> +#define kmap_record_action(nr, action, refcount, retaddr) do {} while (0)
> +#endif

Make this an inline, please, so that we don't introduce unused var warnings.

> +static __init int kmap_history_init(void)
> +{
> +	kmap_history_file = debugfs_create_file("kmap-history", 0644, NULL,
> +			kh_running, &kmap_running_seq_fops);
> +	if (!kmap_history_file)
> +		goto out1;
> +
> +	return 0;
> +
> +out1:
> +	return -ENOMEM;

That seems noisy.. return -ENOMEM is probably fine for such a trivial
funciton :).

> +#define kmap(page)	__kmap(page, __builtin_return_address(0))
> +#define kunmap(page)	__kunmap(page, __builtin_return_address(0))

Hmm, I was hoping we wouldn't have to do this.  Can we use
__builtin_return_address(1) from within the debug paths instead of
passing down (0)?  Then we wouldn't have to ifdef around the declarations..

- z

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-22 16:43 ` Zach Brown
@ 2006-05-22 17:39   ` Andrew Morton
  2006-05-22 17:58     ` Zach Brown
  2006-05-23 18:42   ` Randy.Dunlap
  1 sibling, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2006-05-22 17:39 UTC (permalink / raw)
  To: Zach Brown; +Cc: rdunlap, linux-kernel

Zach Brown <zach.brown@oracle.com> wrote:
>
>  Randy.Dunlap wrote:
>  > From: Randy Dunlap <rdunlap@xenotime.net>
>  > 
>  > Track kmap/kunmap call history, storing caller function address,
>  > action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
>  > Based on a patch to 2.4.21 by Zach Brown that was used successfully
>  > at Oracle to track down some kmap/kunmap problems.
> 
>  Thanks for bringing this to 2.6.. sorry for the lag in reviewing.

I was scratching my head over this patch trying to think of any bug in
recent years which it would have detected.  I failed.

So...  what's the motivator here?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-22 17:39   ` Andrew Morton
@ 2006-05-22 17:58     ` Zach Brown
  2006-05-22 18:11       ` Randy.Dunlap
  0 siblings, 1 reply; 7+ messages in thread
From: Zach Brown @ 2006-05-22 17:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: rdunlap, linux-kernel


> I was scratching my head over this patch trying to think of any bug in
> recent years which it would have detected.  I failed.

2.4 nfs used to require that it be able to kmap entire RPCs for them to
make forward progress.  Its file->write() required RPC forward progress
before it would return.  And some callers were holding kmaps across
file->write() calls.  So with enough concurrent callers doing that the
system would get stuck.

We used the patch to see who the callers were when the system got into
that state.

One of them was core dumping, believe it or not.  2.6 elf_core_dump()
still holds a kmap across file->write(), which seems unwise, but I
haven't gotten to seeing if it's worth worrying about or not.

So maybe these days the kmap story is less dreadful and it isn't as
helpful, but that's what we used it for.

- z

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-22 17:58     ` Zach Brown
@ 2006-05-22 18:11       ` Randy.Dunlap
  0 siblings, 0 replies; 7+ messages in thread
From: Randy.Dunlap @ 2006-05-22 18:11 UTC (permalink / raw)
  To: Zach Brown; +Cc: akpm, linux-kernel

On Mon, 22 May 2006 10:58:54 -0700 Zach Brown wrote:

> 
> > I was scratching my head over this patch trying to think of any bug in
> > recent years which it would have detected.  I failed.
> 
> 2.4 nfs used to require that it be able to kmap entire RPCs for them to
> make forward progress.  Its file->write() required RPC forward progress
> before it would return.  And some callers were holding kmaps across
> file->write() calls.  So with enough concurrent callers doing that the
> system would get stuck.
> 
> We used the patch to see who the callers were when the system got into
> that state.
> 
> One of them was core dumping, believe it or not.  2.6 elf_core_dump()
> still holds a kmap across file->write(), which seems unwise, but I
> haven't gotten to seeing if it's worth worrying about or not.
> 
> So maybe these days the kmap story is less dreadful and it isn't as
> helpful, but that's what we used it for.

I was planning to add kmap_atomic* variants to the patch.
I could see it being useful for those callers, but maybe problems
with them would be more obvious anyway and wouldn't need such
a patch.

---
~Randy

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-22 16:43 ` Zach Brown
  2006-05-22 17:39   ` Andrew Morton
@ 2006-05-23 18:42   ` Randy.Dunlap
  2006-05-24  1:59     ` Randy.Dunlap
  1 sibling, 1 reply; 7+ messages in thread
From: Randy.Dunlap @ 2006-05-23 18:42 UTC (permalink / raw)
  To: Zach Brown; +Cc: linux-kernel, akpm

On Mon, 22 May 2006 09:43:24 -0700 Zach Brown wrote:

> Randy.Dunlap wrote:
> > From: Randy Dunlap <rdunlap@xenotime.net>
> > 
> > Track kmap/kunmap call history, storing caller function address,
> > action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
> > Based on a patch to 2.4.21 by Zach Brown that was used successfully
> > at Oracle to track down some kmap/kunmap problems.
> 
> Thanks for bringing this to 2.6.. sorry for the lag in reviewing.
> 
> > +enum {
> > +	KMAP_FIRST = 1,
> > +	KMAP_ADDREF,
> > +	KMAP_DECREF,
> > +	KMAP_LAST,
> > +};
> 
> I trust you got rid of these in the most recent version :)

Yes.

> > +#else
> > +#define kmap_record_action(nr, action, refcount, retaddr) do {} while (0)
> > +#endif
> 
> Make this an inline, please, so that we don't introduce unused var warnings.

What warnings?  I built the config option =y and =n and didn't see
any warnings.  (not that I mind changing it)

> > +static __init int kmap_history_init(void)
> > +{
> > +	kmap_history_file = debugfs_create_file("kmap-history", 0644, NULL,
> > +			kh_running, &kmap_running_seq_fops);
> > +	if (!kmap_history_file)
> > +		goto out1;
> > +
> > +	return 0;
> > +
> > +out1:
> > +	return -ENOMEM;
> 
> That seems noisy.. return -ENOMEM is probably fine for such a trivial
> funciton :).

noisy how/where?

> > +#define kmap(page)	__kmap(page, __builtin_return_address(0))
> > +#define kunmap(page)	__kunmap(page, __builtin_return_address(0))
> 
> Hmm, I was hoping we wouldn't have to do this.  Can we use
> __builtin_return_address(1) from within the debug paths instead of
> passing down (0)?  Then we wouldn't have to ifdef around the declarations..

I dunno.  I'll test it some. I thought that there were some
problems with __builtin_return_address(1), but I don't know for sure.

---
~Randy

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] kmap tracking
  2006-05-23 18:42   ` Randy.Dunlap
@ 2006-05-24  1:59     ` Randy.Dunlap
  0 siblings, 0 replies; 7+ messages in thread
From: Randy.Dunlap @ 2006-05-24  1:59 UTC (permalink / raw)
  To: lkml; +Cc: zach.brown, akpm

On Tue, 23 May 2006 11:42:41 -0700 Randy.Dunlap wrote:

> On Mon, 22 May 2006 09:43:24 -0700 Zach Brown wrote:
> 
> > Randy.Dunlap wrote:
> > > From: Randy Dunlap <rdunlap@xenotime.net>
> > > 
> > > Track kmap/kunmap call history, storing caller function address,
> > > action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
> > > Based on a patch to 2.4.21 by Zach Brown that was used successfully
> > > at Oracle to track down some kmap/kunmap problems.
> > 
> > Thanks for bringing this to 2.6.. sorry for the lag in reviewing.
> > 
> > > +enum {
> > > +	KMAP_FIRST = 1,
> > > +	KMAP_ADDREF,
> > > +	KMAP_DECREF,
> > > +	KMAP_LAST,
> > > +};
> > 
> > I trust you got rid of these in the most recent version :)
> 
> Yes.
Gone.

> > > +#else
> > > +#define kmap_record_action(nr, action, refcount, retaddr) do {} while (0)
> > > +#endif
> > 
> > Make this an inline, please, so that we don't introduce unused var warnings.
> 
> What warnings?  I built the config option =y and =n and didn't see
> any warnings.  (not that I mind changing it)

I got syntax errors when I changed it to inline, so it's back to
being a macro.

> > > +#define kmap(page)	__kmap(page, __builtin_return_address(0))
> > > +#define kunmap(page)	__kunmap(page, __builtin_return_address(0))
> > 
> > Hmm, I was hoping we wouldn't have to do this.  Can we use
> > __builtin_return_address(1) from within the debug paths instead of
> > passing down (0)?  Then we wouldn't have to ifdef around the declarations..

I have little confidence in the reliability of __builtin_return_address(1).
I see emails about __b_a_r() not working unless using CONFIG_FRAME_POINTER
and sometimes not even then.

Here's an updated kmap tracking patch.  All that I have done is
eliminated the unused enums that you mentioned.  And I don't plan
to add support for kmap_atomic variants unless someone finds a need
for that.

---
From: Randy Dunlap <rdunlap@xenotime.net>

Track kmap/kunmap call history, storing caller function address,
action, and time (jiffies), if CONFIG_DEBUG_KMAP is enabled.
Based on a patch to 2.4.21 by Zach Brown.
Built with CONFIG_DEBUG_KMAP =y and =n.

Creates "kmap-history" in debugfs.
Any write to kmap-history clears the history table.

Output format is (all tab-separated):
table_index action_index next_flag caller_function+offset/size refcount jiffies

Example:
99      0       -       do_execve+0x133/0x1bf   2       39340
99      1       -       do_execve+0x133/0x1bf   2       39340
99      2       +       proc_pid_cmdline+0x53/0xeb      2       4294946079
99      3       -       proc_pid_cmdline+0x53/0xeb      2       4294946079
99      4       -       copy_strings_kernel+0x24/0x2b   2       39340
99      5       -       copy_strings_kernel+0x24/0x2b   2       39340
99      6       -       do_execve+0x11d/0x1bf   2       39340
99      7       -       do_execve+0x11d/0x1bf   2       39340

+ = next; next entry for this table_index goes here.

Each history table row tracks up to 8 actions on it.  This is a
circular history table per kmap address row, so the 8 most recent
actions are stored and older actions on each row are lost.

I don't plan to add tracking for kmap* variants (atomic, pfn, etc.)
unless a need arises for that.

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
---
 arch/i386/Kconfig.debug    |    8 ++
 arch/i386/mm/highmem.c     |   19 ----
 include/asm-i386/highmem.h |   49 +++++++++++-
 lib/Kconfig.debug          |    2 
 mm/highmem.c               |  174 +++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 229 insertions(+), 23 deletions(-)

--- linux-2617-rc4.orig/mm/highmem.c
+++ linux-2617-rc4/mm/highmem.c
@@ -27,8 +27,41 @@
 #include <linux/hash.h>
 #include <linux/highmem.h>
 #include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
+#include <linux/kallsyms.h>
 #include <asm/tlbflush.h>
 
+#ifdef CONFIG_DEBUG_KMAP
+
+#define NUM_ACTIONS 8
+#define NUM_ACTIONS_MASK (NUM_ACTIONS - 1)
+
+struct kmap_action {
+	unsigned long caller;	/* caller's return address */
+	unsigned long jiffies;
+	int count;	/* reference count */
+};
+
+struct kmap_hist {
+	unsigned int next;
+	struct kmap_action acts[NUM_ACTIONS];
+} kh_running[LAST_PKMAP];
+
+static inline void kmap_record_action(unsigned int nr,
+				int refcount, void *retaddr)
+{
+	struct kmap_hist *kh = &kh_running[nr];
+	struct kmap_action *act = &kh->acts[kh->next];
+
+	act->caller = (unsigned long)retaddr;
+	act->count = refcount;
+	act->jiffies = jiffies;
+	kh->next = (kh->next + 1) & NUM_ACTIONS_MASK;
+}
+#else
+#define kmap_record_action(nr, refcount, retaddr) do { } while(0)
+#endif
+
 static mempool_t *page_pool, *isa_page_pool;
 
 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
@@ -142,7 +175,11 @@ start:
 	return vaddr;
 }
 
+#ifdef CONFIG_DEBUG_KMAP
+void fastcall *kmap_high(struct page *page, void *retaddr)
+#else
 void fastcall *kmap_high(struct page *page)
+#endif
 {
 	unsigned long vaddr;
 
@@ -157,6 +194,8 @@ void fastcall *kmap_high(struct page *pa
 	if (!vaddr)
 		vaddr = map_new_virtual(page);
 	pkmap_count[PKMAP_NR(vaddr)]++;
+	kmap_record_action(PKMAP_NR(vaddr),
+			pkmap_count[PKMAP_NR(vaddr)], retaddr);
 	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
 	spin_unlock(&kmap_lock);
 	return (void*) vaddr;
@@ -164,7 +203,11 @@ void fastcall *kmap_high(struct page *pa
 
 EXPORT_SYMBOL(kmap_high);
 
+#ifdef CONFIG_DEBUG_KMAP
+void fastcall kunmap_high(struct page *page, void *retaddr)
+#else
 void fastcall kunmap_high(struct page *page)
+#endif
 {
 	unsigned long vaddr;
 	unsigned long nr;
@@ -175,6 +218,9 @@ void fastcall kunmap_high(struct page *p
 	BUG_ON(!vaddr);
 	nr = PKMAP_NR(vaddr);
 
+	kmap_record_action(PKMAP_NR(vaddr),
+			pkmap_count[nr], retaddr);
+
 	/*
 	 * A count must never go down to zero
 	 * without a TLB flush!
@@ -600,3 +646,131 @@ void __init page_address_init(void)
 }
 
 #endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
+
+/* --------------------------------------------------- */
+
+#ifdef CONFIG_DEBUG_KMAP
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static void *kmap_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct kmap_hist *kha = seq->private;
+
+	if (*pos < 0 || *pos >= LAST_PKMAP)
+		return NULL;
+	return &kha[*pos];
+}
+
+static void *kmap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return kmap_seq_start(seq, pos);
+}
+
+static int kmap_seq_show(struct seq_file *seq, void *v)
+{
+	struct kmap_hist *khbase = seq->private;
+	struct kmap_hist *kh = v;
+	struct kmap_action *act;
+	unsigned int ind;
+	const char *name;
+	char *modname;
+	unsigned long offset, size;
+	char namebuf[KSYM_NAME_LEN + 1];
+	char caller[256];
+
+	spin_lock(&kmap_lock);
+	for (ind = 0; ind < NUM_ACTIONS; ind++) {
+		act = &kh->acts[ind];
+		name = kallsyms_lookup(act->caller, &size, &offset,
+			&modname, namebuf);
+		if (!name)
+			sprintf(caller, "0x%lx", act->caller);
+		else if (modname)
+			sprintf(caller, "%s+%#lx/%#lx [%s]", name, offset,
+				size, modname);
+		else
+			sprintf(caller, "%s+%#lx/%#lx", name, offset, size);
+		seq_printf(seq, "%u\t%u\t%c\t%s\t%d\t%lu\n",
+				kh - &khbase[0], ind,
+				ind == kh->next ? '+' : '-',
+				caller, act->count,
+				act->jiffies);
+	}
+	spin_unlock(&kmap_lock);
+
+	return 0;
+}
+
+static void kmap_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static struct seq_operations kmap_seq_ops = {
+	.start  = kmap_seq_start,
+	.next   = kmap_seq_next,
+	.stop   = kmap_seq_stop,
+	.show   = kmap_seq_show,
+};
+
+static int kmap_shared_open(struct inode *inode, struct file *file,
+			  struct kmap_hist *source)
+{
+	int ret;
+
+	ret = seq_open(file, &kmap_seq_ops);
+	if (!ret) {
+		struct seq_file *seq = file->private_data;
+		seq->private = source;
+	}
+	return ret;
+}
+
+/* *any* write here clears the kmap history tables */
+static ssize_t kmap_reset_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *pos)
+{
+	spin_lock(&kmap_lock);
+	memset(kh_running, 0, sizeof(kh_running));
+	spin_unlock(&kmap_lock);
+	return count;
+}
+
+static int kmap_running_fop_open(struct inode *inode, struct file *file)
+{
+	return kmap_shared_open(inode, file, kh_running);
+}
+
+struct file_operations kmap_running_seq_fops = {
+	.open = kmap_running_fop_open,
+	.read = seq_read,
+	.write = kmap_reset_write,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static struct dentry *kmap_history_file;
+
+static __init int kmap_history_init(void)
+{
+	kmap_history_file = debugfs_create_file("kmap-history", 0644, NULL,
+			kh_running, &kmap_running_seq_fops);
+	if (!kmap_history_file)
+		goto out1;
+
+	return 0;
+
+out1:
+	return -ENOMEM;
+}
+
+static void kmap_history_exit(void)
+{
+	debugfs_remove(kmap_history_file);
+}
+
+subsys_initcall(kmap_history_init);
+module_exit(kmap_history_exit);
+#endif /* CONFIG_DEBUG_KMAP */
--- linux-2617-rc4.orig/arch/i386/mm/highmem.c
+++ linux-2617-rc4/arch/i386/mm/highmem.c
@@ -1,23 +1,6 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 
-void *kmap(struct page *page)
-{
-	might_sleep();
-	if (!PageHighMem(page))
-		return page_address(page);
-	return kmap_high(page);
-}
-
-void kunmap(struct page *page)
-{
-	if (in_interrupt())
-		BUG();
-	if (!PageHighMem(page))
-		return;
-	kunmap_high(page);
-}
-
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  * no global lock is needed and because the kmap code must perform a global TLB
@@ -106,8 +89,6 @@ struct page *kmap_atomic_to_page(void *p
 	return pte_page(*pte);
 }
 
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
 EXPORT_SYMBOL(kmap_atomic_to_page);
--- linux-2617-rc4.orig/include/asm-i386/highmem.h
+++ linux-2617-rc4/include/asm-i386/highmem.h
@@ -63,11 +63,54 @@ extern pte_t *pkmap_page_table;
 #define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
 #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 
-extern void * FASTCALL(kmap_high(struct page *page));
+#ifndef CONFIG_DEBUG_KMAP
+extern void *FASTCALL(kmap_high(struct page *page));
 extern void FASTCALL(kunmap_high(struct page *page));
 
-void *kmap(struct page *page);
-void kunmap(struct page *page);
+#define kmap(page)	__kmap(page)
+#define kunmap(page)	__kunmap(page)
+
+static inline void *__kmap(struct page *page)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_high(page);
+}
+
+static inline void __kunmap(struct page *page)
+{
+	if (in_interrupt())
+		BUG();
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+#else
+extern void *FASTCALL(kmap_high(struct page *page, void *retaddr));
+extern void FASTCALL(kunmap_high(struct page *page, void *retaddr));
+
+#define kmap(page)	__kmap(page, __builtin_return_address(0))
+#define kunmap(page)	__kunmap(page, __builtin_return_address(0))
+
+static inline void *__kmap(struct page *page, void *retaddr)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_high(page, retaddr);
+}
+
+static inline void __kunmap(struct page *page, void *retaddr)
+{
+	if (in_interrupt())
+		BUG();
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page, retaddr);
+}
+#endif
+
 void *kmap_atomic(struct page *page, enum km_type type);
 void kunmap_atomic(void *kvaddr, enum km_type type);
 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
--- linux-2617-rc4.orig/arch/i386/Kconfig.debug
+++ linux-2617-rc4/arch/i386/Kconfig.debug
@@ -51,6 +51,14 @@ config DEBUG_PAGEALLOC
 	  This results in a large slowdown, but helps to find certain types
 	  of memory corruptions.
 
+config DEBUG_KMAP
+	bool "Keep history of kmap/kunmap actions"
+	depends on DEBUG_KERNEL && HIGHMEM
+	help
+	  Keep a table of kmap/kunmap actions for developer debugging.
+	  This table is accessible via debugfs, file "kmap-history".
+	  The data in it can be cleared (reset) by any write to it.
+
 config DEBUG_RODATA
 	bool "Write protect kernel read-only data structures"
 	depends on DEBUG_KERNEL
--- linux-2617-rc4.orig/lib/Kconfig.debug
+++ linux-2617-rc4/lib/Kconfig.debug
@@ -134,7 +134,7 @@ config DEBUG_HIGHMEM
 	bool "Highmem debugging"
 	depends on DEBUG_KERNEL && HIGHMEM
 	help
-	  This options enables addition error checking for high memory systems.
+	  Enables additional error checking for high memory systems.
 	  Disable for production systems.
 
 config DEBUG_BUGVERBOSE

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2006-05-24  1:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-05-18 22:53 [PATCH] kmap tracking Randy.Dunlap
2006-05-22 16:43 ` Zach Brown
2006-05-22 17:39   ` Andrew Morton
2006-05-22 17:58     ` Zach Brown
2006-05-22 18:11       ` Randy.Dunlap
2006-05-23 18:42   ` Randy.Dunlap
2006-05-24  1:59     ` Randy.Dunlap

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).