All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12
@ 2006-11-30 22:59 Catalin Marinas
  2006-11-30 22:59 ` [PATCH 2.6.19 01/10] Base support for kmemleak Catalin Marinas
                   ` (9 more replies)
  0 siblings, 10 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 22:59 UTC (permalink / raw)
  To: linux-kernel

This series of patches represent version 0.12 of the kernel memory
leak detector. See the Documentation/kmemleak.txt file for a more
detailed description. The patches are downloadable from (the whole
patch or the broken-out series) http://www.procode.org/kmemleak/

What's new in this version:

- updated to Linux 2.6.19
- fixed a series of false positives caused but not scanning all the
  data sections in modules (like .data.read_mostly)
- reduced the transient false positives by imposing a configurable
  threshold on the number of times an object needs to be detected as
  leak before being reported

-- 
Catalin

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 01/10] Base support for kmemleak
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
@ 2006-11-30 22:59 ` Catalin Marinas
  2006-11-30 23:00 ` [PATCH 2.6.19 02/10] Kmemleak documentation Catalin Marinas
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 22:59 UTC (permalink / raw)
  To: linux-kernel

This patch adds the base support for the kernel memory leak detector. It
traces the memory allocation/freeing in a way similar to the Boehm's
conservative garbage collector, the difference being that the orphan
pointers are not freed but only shown in /proc/memleak. Enabling this
feature would introduce an overhead to memory allocations.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 include/linux/kernel.h  |    7 
 include/linux/memleak.h |  111 ++++
 init/main.c             |    3 
 lib/Kconfig.debug       |  114 ++++
 mm/Makefile             |    1 
 mm/memleak.c            | 1477 +++++++++++++++++++++++++++++++++++++++++++++++
 6 files changed, 1711 insertions(+), 2 deletions(-)

diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b9b5e4b..936a2ce 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/bitops.h>
+#include <linux/memleak.h>
 #include <asm/byteorder.h>
 #include <asm/bug.h>
 
@@ -300,9 +301,13 @@ static inline int __attribute__ ((format
  * @member:	the name of the member within the struct.
  *
  */
-#define container_of(ptr, type, member) ({			\
+#define __container_of(ptr, type, member) ({			\
         const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
         (type *)( (char *)__mptr - offsetof(type,member) );})
+#define container_of(ptr, type, member) ({			\
+	DECLARE_MEMLEAK_OFFSET(container_of, type, member);	\
+	__container_of(ptr, type, member);			\
+})
 
 /*
  * Check at compile time that something is of a particular type.
diff --git a/include/linux/memleak.h b/include/linux/memleak.h
new file mode 100644
index 0000000..39669bf
--- /dev/null
+++ b/include/linux/memleak.h
@@ -0,0 +1,111 @@
+/*
+ * include/linux/memleak.h
+ *
+ * Copyright (C) 2006 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __MEMLEAK_H
+#define __MEMLEAK_H
+
+#include <linux/stddef.h>
+
+struct memleak_offset {
+	unsigned long type_id;
+	unsigned long member_type_id;
+	unsigned long offset;
+};
+
+/* type id approximation */
+#define ml_guess_typeid(size)	((unsigned long)(size))
+#define ml_typeid(type)		ml_guess_typeid(sizeof(type))
+#define ml_sizeof(typeid)	((size_t)(typeid))
+
+#ifdef CONFIG_DEBUG_MEMLEAK
+
+/* if offsetof(type, member) is not a constant known at compile time,
+ * just use 0 instead since we cannot add it to the
+ * .init.memleak_offsets section
+ */
+#define memleak_offsetof(type, member)				\
+	(__builtin_constant_p(offsetof(type, member)) ?		\
+	 offsetof(type, member) : 0)
+
+#define DECLARE_MEMLEAK_OFFSET(name, type, member)		\
+	static const struct memleak_offset			\
+	__attribute__ ((__section__ (".init.memleak_offsets")))	\
+	__attribute_used__ __memleak_offset__##name = {		\
+		ml_typeid(type),				\
+		ml_typeid(typeof(((type *)0)->member)),		\
+		memleak_offsetof(type, member)			\
+	}
+
+extern void memleak_init(void);
+extern void memleak_alloc(const void *ptr, size_t size, int ref_count);
+extern void memleak_free(const void *ptr);
+extern void memleak_padding(const void *ptr, unsigned long offset, size_t size);
+extern void memleak_not_leak(const void *ptr);
+extern void memleak_ignore(const void *ptr);
+extern void memleak_scan_area(const void *ptr, unsigned long offset, size_t length);
+extern void memleak_insert_aliases(struct memleak_offset *ml_off_start,
+				   struct memleak_offset *ml_off_end);
+
+static inline void memleak_erase(void **ptr)
+{
+	*ptr = NULL;
+}
+
+#define memleak_container(type, member)	{			\
+	DECLARE_MEMLEAK_OFFSET(container_of, type, member);	\
+}
+
+extern void memleak_typeid_raw(const void *ptr, unsigned long type_id);
+#define memleak_typeid(ptr, type) \
+	memleak_typeid_raw(ptr, ml_typeid(type))
+
+#else
+
+#define DECLARE_MEMLEAK_OFFSET(name, type, member)
+
+static inline void memleak_init(void)
+{ }
+static inline void memleak_alloc(const void *ptr, size_t size, int ref_count)
+{ }
+static inline void memleak_free(const void *ptr)
+{ }
+static inline void memleak_padding(const void *ptr, unsigned long offset, size_t size)
+{ }
+static inline void memleak_not_leak(const void *ptr)
+{ }
+static inline void memleak_ignore(const void *ptr)
+{ }
+static inline void memleak_scan_area(const void *ptr, unsigned long offset, size_t length)
+{ }
+static inline void memleak_insert_aliases(struct memleak_offset *ml_off_start,
+					  struct memleak_offset *ml_off_end)
+{ }
+static inline void memleak_erase(void **ptr)
+{ }
+
+#define memleak_container(type, member)
+
+static inline void memleak_typeid_raw(const void *ptr, unsigned long type_id)
+{ }
+#define memleak_typeid(ptr, type)
+
+#endif	/* CONFIG_DEBUG_MEMLEAK */
+
+#endif	/* __MEMLEAK_H */
diff --git a/init/main.c b/init/main.c
index 36f608a..39be647 100644
--- a/init/main.c
+++ b/init/main.c
@@ -571,6 +571,8 @@ asmlinkage void __init start_kernel(void
 	cpuset_init_early();
 	mem_init();
 	kmem_cache_init();
+	radix_tree_init();
+	memleak_init();
 	setup_per_cpu_pageset();
 	numa_policy_init();
 	if (late_time_init)
@@ -591,7 +593,6 @@ asmlinkage void __init start_kernel(void
 	key_init();
 	security_init();
 	vfs_caches_init(num_physpages);
-	radix_tree_init();
 	signals_init();
 	/* rootfs populating might need page-writeback */
 	page_writeback_init();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d367910..2dda93b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -112,6 +112,120 @@ config DEBUG_SLAB_LEAK
 	bool "Memory leak debugging"
 	depends on DEBUG_SLAB
 
+menuconfig DEBUG_MEMLEAK
+	bool "Kernel memory leak detector"
+	default n
+	depends on EXPERIMENTAL && DEBUG_SLAB
+	select DEBUG_FS
+	select STACKTRACE
+	select FRAME_POINTER
+	select KALLSYMS
+	help
+	  Say Y here if you want to enable the memory leak
+	  detector. The memory allocation/freeing is traced in a way
+	  similar to the Boehm's conservative garbage collector, the
+	  difference being that the orphan objects are not freed but
+	  only shown in /sys/kernel/debug/memleak. Enabling this
+	  feature will introduce an overhead to memory
+	  allocations. See Documentation/kmemleak.txt for more
+	  details.
+
+	  In order to access the memleak file, debugfs needs to be
+	  mounted (usually at /sys/kernel/debug).
+
+config DEBUG_MEMLEAK_HASH_BITS
+	int "Pointer hash bits"
+	default 16
+	depends on DEBUG_MEMLEAK
+	help
+	  This option sets the number of bits used for the pointer
+	  hash table. Higher values give better memory scanning
+	  performance but also lead to bigger RAM usage. The size of
+	  the allocated hash table is (sizeof(void*) * 2^hash_bits).
+
+	  The minimum recommended value is 16. A maximum value of
+	  around 20 should be sufficient.
+
+config DEBUG_MEMLEAK_TRACE_LENGTH
+	int "Stack trace length"
+	default 4
+	depends on DEBUG_MEMLEAK && FRAME_POINTER
+	help
+	  This option sets the length of the stack trace for the
+	  allocated objects tracked by kmemleak.
+
+config DEBUG_MEMLEAK_PREINIT_OBJECTS
+	int "Pre-init actions buffer size"
+	default 512
+	depends on DEBUG_MEMLEAK
+	help
+	  This is the buffer for storing the memory allocation/freeing
+	  calls before kmemleak is fully initialized. Each element in
+	  the buffer takes 24 bytes on a 32 bit architecture. This
+	  buffer will be freed once the system initialization is
+	  completed.
+
+config DEBUG_MEMLEAK_SECONDARY_ALIASES
+	bool "Create secondary level pointer aliases"
+	default y
+	depends on DEBUG_MEMLEAK
+	help
+	  This option creates aliases for container_of(container_of(member))
+	  access to objects. Disabling this option reduces the chances of
+	  false negatives but it can slightly increase the number of false
+	  positives.
+
+config DEBUG_MEMLEAK_TASK_STACKS
+	bool "Scan task kernel stacks"
+	default y
+	depends on DEBUG_MEMLEAK
+	help
+	  This option enables the scanning of the task kernel
+	  stacks. This option can introduce false negatives because of
+	  the randomness of stacks content.
+
+	  If unsure, say Y.
+
+config DEBUG_MEMLEAK_ORPHAN_FREEING
+	bool "Notify when freeing orphan objects"
+	default n
+	depends on DEBUG_MEMLEAK
+	help
+	  This option enables the notification when objects
+	  considered leaks are freed. The stack dump and the object
+	  information displayed allow an easier identification of
+	  false positives. Use this mainly for debugging kmemleak.
+
+	  If unsure, say N.
+
+config DEBUG_MEMLEAK_REPORT_THLD
+	int "Unreferenced reporting threshold"
+	default 1
+	depends on DEBUG_MEMLEAK
+	help
+	  This option sets the number of times an object needs to be
+	  detected as unreferenced before being reported as a memory
+	  leak. A value of 0 means that the object will be reported
+	  the first time it is found as unreferenced. Other positive
+	  values mean that the object needs to be found as
+	  unreferenced a specified number of times prior to being
+	  reported.
+
+	  This is useful to avoid the reporting of transient false
+	  positives where the pointers might be held in CPU registers,
+	  especially on SMP systems, or on the stack when the stack
+	  scanning option is disabled.
+
+config DEBUG_MEMLEAK_REPORTS_NR
+	int "Maximum number of reported leaks"
+	default 100
+	depends on DEBUG_MEMLEAK
+	help
+	  This option sets the maximum number of leaks reported. If
+	  this number is too big and there are leaks to be reported,
+	  reading the /sys/kernel/debug/memleak file could lead to
+	  some soft-locks.
+
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
diff --git a/mm/Makefile b/mm/Makefile
index f3c077e..aafe03f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_h
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
+obj-$(CONFIG_DEBUG_MEMLEAK) += memleak.o
diff --git a/mm/memleak.c b/mm/memleak.c
new file mode 100644
index 0000000..db62a1d
--- /dev/null
+++ b/mm/memleak.c
@@ -0,0 +1,1477 @@
+/*
+ * mm/memleak.c
+ *
+ * Copyright (C) 2006 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Notes on locking
+ *
+ * Kmemleak needs to allocate/free memory for its own data structures:
+ * the memleak_object, the pointer hash and aliases radix trees. The
+ * memleak_free hook can be called from mm/slab.c with the list_lock
+ * held (i.e. when releasing off-slab management structures) and it
+ * will ackquire the memleak_lock. To avoid deadlocks caused by
+ * locking dependency, the list_lock must not be acquired while
+ * memleak_lock is held. This is ensured by not allocating/freeing
+ * memory while any of the kmemleak locks are held.
+ *
+ * The kmemleak hooks cannot be called concurrently on the same
+ * memleak_object (this is due to the way they were inserted in the
+ * kernel).
+ *
+ * The following locks are present in kmemleak:
+ *
+ * - alias_tree_lock - rwlock for accessing the radix tree holding the
+ *   objects type information
+ *
+ * - memleak_lock - global kmemleak lock; protects object_list,
+ *   last_object, pointer_hash and memleak_object structures
+ *
+ * Locking dependencies:
+ *
+ * - alias_tree_lock --> l3->list_lock
+ * - l3->list_lock --> memleak_lock
+ */
+
+/* #define DEBUG */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kallsyms.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/hash.h>
+#include <linux/stacktrace.h>
+
+#include <asm/bitops.h>
+#include <asm/sections.h>
+#include <asm/percpu.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/atomic.h>
+
+#include <linux/memleak.h>
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define BUG_ON_LOCKING(cond)	BUG_ON(cond)
+#else
+#define BUG_ON_LOCKING(cond)
+#endif
+
+#define MAX_TRACE		CONFIG_DEBUG_MEMLEAK_TRACE_LENGTH
+#define SCAN_BLOCK_SIZE		4096		/* maximum scan length with interrupts disabled */
+#define PREINIT_OBJECTS		CONFIG_DEBUG_MEMLEAK_PREINIT_OBJECTS
+#define HASH_BITS		CONFIG_DEBUG_MEMLEAK_HASH_BITS
+#define BYTES_PER_WORD		sizeof(void *)
+
+extern struct memleak_offset __memleak_offsets_start[];
+extern struct memleak_offset __memleak_offsets_end[];
+
+struct memleak_alias {
+	struct hlist_node node;
+	unsigned long offset;
+};
+
+struct memleak_scan_area {
+	struct hlist_node node;
+	unsigned long offset;
+	size_t length;
+};
+
+struct memleak_object {
+	unsigned long flags;
+	struct list_head object_list;
+	struct list_head gray_list;
+	struct rcu_head rcu;
+	int use_count;
+	unsigned long pointer;
+	unsigned long offset;		/* padding */
+	size_t size;
+	unsigned long type_id;
+	int ref_count;			/* the minimum encounters of the value */
+	int count;			/* the ecounters of the value */
+	int report_thld;		/* the unreferenced reporting threshold */
+	struct hlist_head *alias_list;
+	struct hlist_head area_list;	/* areas to be scanned (or empty for all) */
+	unsigned long trace[MAX_TRACE];
+	unsigned int trace_len;
+};
+
+struct hash_node {
+	struct hlist_node node;
+	unsigned long val;
+	void *object;
+};
+
+enum memleak_action {
+	MEMLEAK_ALLOC,
+	MEMLEAK_FREE,
+	MEMLEAK_PADDING,
+	MEMLEAK_NOT_LEAK,
+	MEMLEAK_IGNORE,
+	MEMLEAK_SCAN_AREA,
+	MEMLEAK_TYPEID
+};
+
+struct memleak_preinit_object {
+	enum memleak_action type;
+	const void *pointer;
+	unsigned long offset;
+	size_t size;
+	unsigned long type_id;
+	int ref_count;
+};
+
+/* Tree storing the pointer aliases indexed by size */
+static RADIX_TREE(alias_tree, GFP_ATOMIC);
+static DEFINE_RWLOCK(alias_tree_lock);
+/* Hash storing all the possible objects, indexed by the pointer value */
+static struct hlist_head *pointer_hash;
+/* The list of all allocated objects */
+static LIST_HEAD(object_list);
+/* The list of the gray objects */
+static LIST_HEAD(gray_list);
+
+static struct kmem_cache *object_cache;
+/* The main lock for protecting the object lists and radix trees */
+static DEFINE_SPINLOCK(memleak_lock);
+static cpumask_t memleak_cpu_mask = CPU_MASK_NONE;
+static atomic_t memleak_initialized = ATOMIC_INIT(0);
+static int __initdata preinit_pos;
+static struct memleak_preinit_object __initdata preinit_objects[PREINIT_OBJECTS];
+/* last allocated object (optimization); protected by memleak_lock */
+static struct memleak_object *last_object;
+static int reported_leaks;
+
+/* object flags */
+#define OBJECT_ALLOCATED	0x1
+#define OBJECT_TYPE_GUESSED	0x2
+
+/* Hash functions */
+static void hash_init(void)
+{
+	unsigned int i;
+	unsigned int hash_size = sizeof(*pointer_hash) * (1 << HASH_BITS);
+	unsigned int hash_order = fls(hash_size) - 1;
+
+	/* hash_size not a power of 2 */
+	if (hash_size & ((1 << hash_order) - 1))
+		hash_order += 1;
+	if (hash_order < PAGE_SHIFT)
+		hash_order = PAGE_SHIFT;
+
+	pointer_hash = (struct hlist_head *)
+		__get_free_pages(GFP_ATOMIC, hash_order - PAGE_SHIFT);
+	if (!pointer_hash)
+		panic("kmemleak: cannot allocate the pointer hash\n");
+
+	for (i = 0; i < (1 << HASH_BITS); i++)
+		INIT_HLIST_HEAD(&pointer_hash[i]);
+}
+
+static struct hash_node *__hash_lookup_node(unsigned long val)
+{
+	struct hlist_node *elem;
+	struct hash_node *hnode;
+	unsigned long index = hash_long(val, HASH_BITS);
+
+	hlist_for_each_entry(hnode, elem, &pointer_hash[index], node) {
+		if (hnode->val == val)
+			return hnode;
+	}
+	return NULL;
+}
+
+static int hash_insert(unsigned long val, void *object)
+{
+	unsigned long flags;
+	unsigned long index = hash_long(val, HASH_BITS);
+	struct hash_node *hnode = kmalloc(sizeof(*hnode), GFP_ATOMIC);
+
+	if (!hnode)
+		return -ENOMEM;
+	INIT_HLIST_NODE(&hnode->node);
+	hnode->val = val;
+	hnode->object = object;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+	hlist_add_head(&hnode->node, &pointer_hash[index]);
+	spin_unlock_irqrestore(&memleak_lock, flags);
+
+	return 0;
+}
+
+static void *hash_delete(unsigned long val)
+{
+	unsigned long flags;
+	void *object = NULL;
+	struct hash_node *hnode;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+	hnode = __hash_lookup_node(val);
+	if (hnode) {
+		object = hnode->object;
+		hlist_del(&hnode->node);
+	}
+	spin_unlock_irqrestore(&memleak_lock, flags);
+
+	kfree(hnode);
+	return object;
+}
+
+/* memleak_lock held by the calling function and interrupts disabled */
+static void *hash_lookup(unsigned long val)
+{
+	struct hash_node *hnode;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+	BUG_ON_LOCKING(!spin_is_locked(&memleak_lock));
+
+	hnode = __hash_lookup_node(val);
+	if (hnode)
+		return hnode->object;
+	return NULL;
+}
+
+/* helper macros to avoid recursive calls. After disabling the
+ * interrupts, the only calls to this function on the same CPU should
+ * be from kmemleak itself and we can either ignore them or
+ * panic. Calls from other CPU's should be protected by spinlocks */
+#define recursive_enter(cpu_id, flags)	({		\
+	local_irq_save(flags);				\
+	cpu_id = get_cpu();				\
+	cpu_test_and_set(cpu_id, memleak_cpu_mask);	\
+})
+
+#define recursive_clear(cpu_id)		do {		\
+	cpu_clear(cpu_id, memleak_cpu_mask);		\
+} while (0)
+
+#define recursive_exit(flags)		do {		\
+	put_cpu_no_resched();				\
+	local_irq_restore(flags);			\
+} while (0)
+
+/* Object colors, encoded with count and ref_count:
+ *   - white - orphan object, i.e. not enough references to it (ref_count >= 1)
+ *   - gray  - referred at least once and therefore non-orphan (ref_count == 0)
+ *   - black - ignore; it doesn't contain references (text section) (ref_count == -1) */
+static inline int color_white(const struct memleak_object *object)
+{
+	return object->count != -1 && object->count < object->ref_count;
+}
+
+static inline int color_gray(const struct memleak_object *object)
+{
+	return object->ref_count != -1 && object->count >= object->ref_count;
+}
+
+static inline int color_black(const struct memleak_object *object)
+{
+	return object->ref_count == -1;
+}
+
+#ifdef DEBUG
+static inline void dump_object_internals(struct memleak_object *object)
+{
+	struct memleak_alias *alias;
+	struct hlist_node *elem;
+
+	printk(KERN_NOTICE "  size = %d\n", object->size);
+	printk(KERN_NOTICE "  ref_count = %d\n", object->ref_count);
+	printk(KERN_NOTICE "  count = %d\n", object->count);
+	printk(KERN_NOTICE "  aliases:\n");
+	if (object->alias_list) {
+		hlist_for_each_entry(alias, elem, object->alias_list, node)
+			printk(KERN_NOTICE "    0x%lx\n", alias->offset);
+	}
+}
+#else
+static inline void dump_object_internals(struct memleak_object *object)
+{ }
+#endif
+
+static void dump_object_info(struct memleak_object *object)
+{
+	struct stack_trace trace;
+
+	trace.nr_entries = object->trace_len;
+	trace.entries = object->trace;
+
+	printk(KERN_NOTICE "kmemleak: object 0x%08lx:\n", object->pointer);
+	dump_object_internals(object);
+	printk(KERN_NOTICE "  trace:\n");
+	print_stack_trace(&trace, 4);
+}
+
+/* Insert an element into the aliases radix tree.
+ * Return 0 on success. */
+static int insert_alias(unsigned long type_id, unsigned long offset)
+{
+	int ret = 0;
+	struct hlist_head *alias_list;
+	struct hlist_node *elem;
+	struct memleak_alias *alias;
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (type_id == 0 || offset == 0 || offset >= ml_sizeof(type_id))
+		return -EINVAL;
+
+	if (recursive_enter(cpu_id, flags))
+		BUG();
+	write_lock(&alias_tree_lock);
+
+	offset &= ~(BYTES_PER_WORD - 1);
+
+	alias_list = radix_tree_lookup(&alias_tree, type_id);
+	if (!alias_list) {
+		/* no alias list for this type id. Allocate list_head
+		 * and insert into the radix tree */
+		alias_list = kmalloc(sizeof(*alias_list), GFP_ATOMIC);
+		if (!alias_list)
+			panic("kmemleak: cannot allocate alias_list\n");
+		INIT_HLIST_HEAD(alias_list);
+
+		ret = radix_tree_insert(&alias_tree, type_id, alias_list);
+		if (ret)
+			panic("kmemleak: cannot insert into the aliases radix tree: %d\n", ret);
+	}
+
+	hlist_for_each_entry(alias, elem, alias_list, node) {
+		if (alias->offset == offset) {
+			ret = -EEXIST;
+			goto out;
+		}
+	}
+
+	alias = kmalloc(sizeof(*alias), GFP_ATOMIC);
+	if (!alias)
+		panic("kmemleak: cannot allocate initial memory\n");
+	INIT_HLIST_NODE(&alias->node);
+	alias->offset = offset;
+
+	hlist_add_head_rcu(&alias->node, alias_list);
+
+ out:
+	write_unlock(&alias_tree_lock);
+	recursive_clear(cpu_id);
+	recursive_exit(flags);
+
+	return ret;
+}
+
+/* Insert pointer aliases from the given array */
+void memleak_insert_aliases(struct memleak_offset *ml_off_start,
+			    struct memleak_offset *ml_off_end)
+{
+	struct memleak_offset *ml_off;
+	int i = 0;
+#ifdef CONFIG_DEBUG_MEMLEAK_SECONDARY_ALIASES
+	unsigned long flags;
+#endif
+
+	pr_debug("%s(0x%p, 0x%p)\n", __FUNCTION__, ml_off_start, ml_off_end);
+
+	/* primary aliases - container_of(member) */
+	for (ml_off = ml_off_start; ml_off < ml_off_end; ml_off++)
+		if (!insert_alias(ml_off->type_id, ml_off->offset))
+			i++;
+	pr_debug("kmemleak: found %d primary alias(es)\n", i);
+
+#ifdef CONFIG_DEBUG_MEMLEAK_SECONDARY_ALIASES
+	/* secondary aliases - container_of(container_of(member)) */
+	for (ml_off = ml_off_start; ml_off < ml_off_end; ml_off++) {
+		struct hlist_head *alias_list;
+		struct memleak_alias *alias;
+		struct hlist_node *elem;
+
+		/* with imprecise type identification, if the member
+		 * id is the same as the outer structure id, just
+		 * ignore as any potential aliases are already in the
+		 * tree */
+		if (ml_off->member_type_id == ml_off->type_id)
+			continue;
+
+		read_lock_irqsave(&alias_tree_lock, flags);
+		alias_list = radix_tree_lookup(&alias_tree, ml_off->member_type_id);
+		read_unlock_irqrestore(&alias_tree_lock, flags);
+		if (!alias_list)
+			continue;
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(alias, elem, alias_list, node)
+			if (!insert_alias(ml_off->type_id, ml_off->offset + alias->offset))
+				i++;
+		rcu_read_unlock();
+	}
+	pr_debug("kmemleak: found %d alias(es)\n", i);
+#endif
+}
+EXPORT_SYMBOL_GPL(memleak_insert_aliases);
+
+/* called with interrupts disabled */
+static inline struct memleak_object *get_cached_object(unsigned long ptr)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	spin_lock(&memleak_lock);
+	if (!last_object || ptr != last_object->pointer)
+		last_object = hash_lookup(ptr);
+	object = last_object;
+	spin_unlock(&memleak_lock);
+
+	return object;
+}
+
+/* no need for atomic operations since memleak_lock is already held
+ * and interrupts disabled. Return 1 if successful or 0 otherwise */
+static inline int get_object(struct memleak_object *object)
+{
+	BUG_ON_LOCKING(!irqs_disabled());
+	BUG_ON_LOCKING(!spin_is_locked(&memleak_lock));
+
+	if (object->use_count != 0)
+		object->use_count++;
+	return object->use_count != 0;
+}
+
+static void free_object_rcu(struct rcu_head *rcu)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+	struct hlist_node *elem, *tmp;
+	struct memleak_scan_area *area;
+	struct memleak_object *object =
+		container_of(rcu, struct memleak_object, rcu);
+
+	if (recursive_enter(cpu_id, flags))
+		BUG();
+
+	/* once use_count is 0, there is no code accessing the object */
+	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
+		hlist_del(elem);
+		kfree(area);
+	}
+	kmem_cache_free(object_cache, object);
+
+	recursive_clear(cpu_id);
+	recursive_exit(flags);
+}
+
+/* called without memleak_lock held */
+static void put_object(struct memleak_object *object)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+
+	if (--object->use_count > 0)
+		goto out;
+
+	/* should only get here after delete_object was called */
+	BUG_ON(object->flags & OBJECT_ALLOCATED);
+
+	/* the last reference to this object */
+	list_del_rcu(&object->object_list);
+	call_rcu(&object->rcu, free_object_rcu);
+
+ out:
+	spin_unlock_irqrestore(&memleak_lock, flags);
+}
+
+/* called with interrupts disabled (no need to hold the memleak_lock
+ * as the the pointer aliases functions cannot be called concurrently
+ * on the same object) */
+static void delete_pointer_aliases(struct memleak_object *object)
+{
+	struct memleak_alias *alias;
+	struct hlist_node *elem;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	if (object->offset)
+		hash_delete(object->pointer + object->offset);
+
+	if (object->alias_list) {
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(alias, elem, object->alias_list, node)
+			hash_delete(object->pointer
+				    + object->offset + alias->offset);
+		rcu_read_unlock();
+		object->alias_list = NULL;
+	}
+}
+
+/* called with interrupts disabled (see above for why memleak_lock
+ * doesn't need to be held) */
+static void create_pointer_aliases(struct memleak_object *object)
+{
+	struct memleak_alias *alias;
+	struct hlist_node *elem;
+	int err;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	if (object->offset) {
+		err = hash_insert(object->pointer + object->offset, object);
+		if (err) {
+			dump_stack();
+			panic("kmemleak: cannot insert offset into the pointer hash table: %d\n", err);
+		}
+	}
+
+	read_lock(&alias_tree_lock);
+	object->alias_list = radix_tree_lookup(&alias_tree, object->type_id);
+	read_unlock(&alias_tree_lock);
+
+	if (object->alias_list) {
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(alias, elem, object->alias_list, node) {
+			err = hash_insert(object->pointer + object->offset
+					  + alias->offset, object);
+			if (err) {
+				dump_stack();
+				panic("kmemleak: cannot insert alias into the pointer hash table: %d\n", err);
+			}
+		}
+		rcu_read_unlock();
+	}
+}
+
+/* Insert a pointer and its aliases into the pointer hash table */
+static inline void create_object(unsigned long ptr, size_t size, int ref_count)
+{
+	struct memleak_object *object;
+	int err;
+	struct stack_trace trace;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = kmem_cache_alloc(object_cache, SLAB_ATOMIC);
+	if (!object)
+		panic("kmemleak: cannot allocate a memleak_object structure\n");
+
+	INIT_LIST_HEAD(&object->object_list);
+	INIT_LIST_HEAD(&object->gray_list);
+	INIT_HLIST_HEAD(&object->area_list);
+	object->flags = OBJECT_TYPE_GUESSED;
+	object->use_count = 1;
+	object->pointer = ptr;
+	object->offset = 0;
+	object->size = size;
+	object->type_id = ml_guess_typeid(size);	/* type id approximation */
+	object->ref_count = ref_count;
+	object->count = -1;
+	object->report_thld = CONFIG_DEBUG_MEMLEAK_REPORT_THLD;
+	object->alias_list = NULL;
+
+	trace.max_entries = MAX_TRACE;
+	trace.nr_entries = 0;
+	trace.entries = object->trace;
+	trace.skip = 2;
+	trace.all_contexts = 0;
+	save_stack_trace(&trace, NULL);
+
+	object->trace_len = trace.nr_entries;
+
+	spin_lock(&memleak_lock);
+	/* object->use_count already set to 1 */
+	list_add_tail_rcu(&object->object_list, &object_list);
+	spin_unlock(&memleak_lock);
+
+	err = hash_insert(ptr, object);
+	if (err) {
+		dump_stack();
+		if (err == -EEXIST) {
+			printk(KERN_NOTICE "Existing pointer:\n");
+			spin_lock(&memleak_lock);
+			object = hash_lookup(ptr);
+			dump_object_info(object);
+			spin_unlock(&memleak_lock);
+		}
+		panic("kmemleak: cannot insert 0x%lx into the pointer hash table: %d\n",
+		      ptr, err);
+	}
+
+	create_pointer_aliases(object);
+
+	/* everything completed fine, just mark the object as allocated */
+	spin_lock(&memleak_lock);
+	object->flags |= OBJECT_ALLOCATED;
+	last_object = object;
+	spin_unlock(&memleak_lock);
+}
+
+/* Remove a pointer and its aliases from the pointer hash table */
+static inline void delete_object(unsigned long ptr)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = hash_delete(ptr);
+	if (!object) {
+		dump_stack();
+		printk(KERN_WARNING "kmemleak: freeing unknown object at 0x%08lx\n", ptr);
+		return;
+	}
+
+	spin_lock(&memleak_lock);
+
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: freeing object by alias 0x%08lx\n", ptr);
+	}
+	BUG_ON(!(object->flags & OBJECT_ALLOCATED));
+
+	object->flags &= ~OBJECT_ALLOCATED;
+
+	/* deleting the cached object */
+	if (last_object && ptr == last_object->pointer)
+		last_object = NULL;
+
+#ifdef CONFIG_DEBUG_MEMLEAK_ORPHAN_FREEING
+	if (color_white(object)) {
+		dump_stack();
+		dump_object_info(object);
+		printk(KERN_WARNING "kmemleak: freeing orphan object 0x%08lx\n", ptr);
+	}
+#endif
+
+	spin_unlock(&memleak_lock);
+
+	delete_pointer_aliases(object);
+	object->pointer = 0;
+	put_object(object);
+}
+
+/* Re-create the pointer aliases according to the new size/offset
+ * information */
+static inline void unpad_object(unsigned long ptr, unsigned long offset,
+				size_t size)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = get_cached_object(ptr);
+	if (!object) {
+		dump_stack();
+		panic("kmemleak: resizing unknown object at 0x%08lx\n", ptr);
+	}
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: resizing object by alias 0x%08lx\n", ptr);
+	}
+	if (offset + size > object->size) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: new boundaries exceed object 0x%08lx\n", ptr);
+	}
+
+	/* nothing changed */
+	if (offset == object->offset && size == object->size)
+		return;
+
+	/* re-create the pointer aliases */
+	delete_pointer_aliases(object);
+
+	spin_lock(&memleak_lock);
+	object->offset = offset;
+	object->size = size;
+	if (object->flags & OBJECT_TYPE_GUESSED)
+		object->type_id = ml_guess_typeid(size);
+	spin_unlock(&memleak_lock);
+
+	create_pointer_aliases(object);
+}
+
+/* Make a object permanently gray (false positive) */
+static inline void make_gray_object(unsigned long ptr)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = get_cached_object(ptr);
+	if (!object) {
+		dump_stack();
+		panic("kmemleak: graying unknown object at 0x%08lx\n", ptr);
+	}
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: graying object by alias 0x%08lx\n", ptr);
+	}
+
+	spin_lock(&memleak_lock);
+	object->ref_count = 0;
+	spin_unlock(&memleak_lock);
+}
+
+/* Mark the object as black */
+static inline void make_black_object(unsigned long ptr)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = get_cached_object(ptr);
+	if (!object) {
+		dump_stack();
+		panic("kmemleak: blacking unknown object at 0x%08lx\n", ptr);
+	}
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: blacking object by alias 0x%08lx\n", ptr);
+	}
+
+	spin_lock(&memleak_lock);
+	object->ref_count = -1;
+	spin_unlock(&memleak_lock);
+}
+
+/* Add a scanning area to the object */
+static inline void add_scan_area(unsigned long ptr, unsigned long offset, size_t length)
+{
+	struct memleak_object *object;
+	struct memleak_scan_area *area;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	area = kmalloc(sizeof(*area), GFP_ATOMIC);
+	if (!area)
+		panic("kmemleak: cannot allocate a scan area\n");
+
+	INIT_HLIST_NODE(&area->node);
+	area->offset = offset;
+	area->length = length;
+
+	object = get_cached_object(ptr);
+	if (!object) {
+		dump_stack();
+		panic("kmemleak: adding scan area to unknown object at 0x%08lx\n", ptr);
+	}
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: adding scan area to object by alias 0x%08lx\n", ptr);
+	}
+	if (offset + length > object->size) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: scan area larger than object 0x%08lx\n", ptr);
+	}
+
+	spin_lock(&memleak_lock);
+	hlist_add_head(&area->node, &object->area_list);
+	spin_unlock(&memleak_lock);
+}
+
+/* Re-create the pointer aliases according to the new type id */
+static inline void change_type_id(unsigned long ptr, unsigned long type_id)
+{
+	struct memleak_object *object;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+
+	object = get_cached_object(ptr);
+	if (!object) {
+		dump_stack();
+		panic("kmemleak: changing type of unknown object at 0x%08lx\n", ptr);
+	}
+	if (object->pointer != ptr) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: changing type of object by alias 0x%08lx\n", ptr);
+	}
+	if (ml_sizeof(type_id) > object->size) {
+		dump_stack();
+		dump_object_info(object);
+		panic("kmemleak: new type larger than object 0x%08lx\n", ptr);
+	}
+
+	spin_lock(&memleak_lock);
+	object->type_id = type_id;
+	object->flags &= ~OBJECT_TYPE_GUESSED;
+	spin_unlock(&memleak_lock);
+
+	if (type_id == object->type_id)
+		return;
+
+	delete_pointer_aliases(object);
+	create_pointer_aliases(object);
+}
+
+/* Allocation function hook */
+void memleak_alloc(const void *ptr, size_t size, int ref_count)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p, %d, %d)\n", __FUNCTION__, ptr, size, ref_count);
+
+	if (!atomic_read(&memleak_initialized)) {
+		/* no need for SMP locking since this object is
+		 * executed before the other CPUs are started */
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_ALLOC;
+			object->pointer = ptr;
+			object->size = size;
+			object->ref_count = ref_count;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	create_object((unsigned long)ptr, size, ref_count);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL_GPL(memleak_alloc);
+
+/* Freeing function hook */
+void memleak_free(const void *ptr)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p)\n", __FUNCTION__, ptr);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_FREE;
+			object->pointer = ptr;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	delete_object((unsigned long)ptr);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL_GPL(memleak_free);
+
+/* Change the size and location information of an allocated memory
+ * object (this is needed for allocations padding the object) */
+void memleak_padding(const void *ptr, unsigned long offset, size_t size)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p, %d)\n", __FUNCTION__, ptr, size);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_PADDING;
+			object->pointer = ptr;
+			object->offset = offset;
+			object->size = size;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	unpad_object((unsigned long)ptr, offset, size);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL(memleak_padding);
+
+/* Mark a object as a false positive */
+void memleak_not_leak(const void *ptr)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p)\n", __FUNCTION__, ptr);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_NOT_LEAK;
+			object->pointer = ptr;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	make_gray_object((unsigned long)ptr);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL(memleak_not_leak);
+
+/* Ignore this memory object */
+void memleak_ignore(const void *ptr)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p)\n", __FUNCTION__, ptr);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_IGNORE;
+			object->pointer = ptr;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	make_black_object((unsigned long)ptr);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL(memleak_ignore);
+
+/* Add a scanning area to a object */
+void memleak_scan_area(const void *ptr, unsigned long offset, size_t length)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p)\n", __FUNCTION__, ptr);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_SCAN_AREA;
+			object->pointer = ptr;
+			object->offset = offset;
+			object->size = length;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	add_scan_area((unsigned long)ptr, offset, length);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL(memleak_scan_area);
+
+/* Change the type id of an allocated memory object */
+void memleak_typeid_raw(const void *ptr, unsigned long type_id)
+{
+	unsigned long flags;
+	unsigned int cpu_id;
+
+	if (!ptr)
+		return;
+	if (!type_id)
+		return;
+
+	if (recursive_enter(cpu_id, flags))
+		goto out;
+
+	pr_debug("%s(0x%p, %ld)\n", __FUNCTION__, ptr, type_id);
+
+	if (!atomic_read(&memleak_initialized)) {
+		struct memleak_preinit_object *object;
+
+		BUG_ON(cpu_id != 0);
+
+		if (preinit_pos < PREINIT_OBJECTS) {
+			object = &preinit_objects[preinit_pos];
+
+			object->type = MEMLEAK_TYPEID;
+			object->pointer = ptr;
+			object->type_id = type_id;
+		}
+		preinit_pos++;
+
+		goto clear;
+	}
+
+	change_type_id((unsigned long)ptr, type_id);
+
+ clear:
+	recursive_clear(cpu_id);
+ out:
+	recursive_exit(flags);
+}
+EXPORT_SYMBOL(memleak_typeid_raw);
+
+/* Scan a block of memory (exclusive range) for pointers and move
+ * those found to the gray list. This function is called with
+ * memleak_lock held and interrupts disabled */
+static void __scan_block(void *_start, void *_end)
+{
+	unsigned long *ptr;
+	unsigned long *start = (unsigned long *)ALIGN((unsigned long)_start,
+						      BYTES_PER_WORD);
+	unsigned long *end = _end;
+
+	BUG_ON_LOCKING(!irqs_disabled());
+	BUG_ON_LOCKING(!spin_is_locked(&memleak_lock));
+
+	for (ptr = start; ptr < end; ptr++) {
+		struct memleak_object *object =
+			hash_lookup((*ptr) & ~(BYTES_PER_WORD - 1));
+		if (!object)
+			continue;
+		if (!color_white(object))
+			continue;
+
+		object->count++;
+		/* this can also happen during the gray_list traversal */
+		if (color_gray(object)) {
+			/* found in the hash, get_object() returns 1 */
+			get_object(object);
+			object->report_thld++;
+			list_add_tail(&object->gray_list, &gray_list);
+		}
+	}
+}
+
+static void scan_block(void *start, void *end)
+{
+	unsigned long flags;
+	void *s, *e;
+
+	s = start;
+	while (s < end) {
+		e = s + SCAN_BLOCK_SIZE;
+
+		spin_lock_irqsave(&memleak_lock, flags);
+		__scan_block(s, e < end ? e : end);
+		spin_unlock_irqrestore(&memleak_lock, flags);
+
+		s = e;
+	}
+}
+
+/* Scan a memory block represented by a memleak_object */
+static inline void scan_object(struct memleak_object *object)
+{
+	struct memleak_scan_area *area;
+	struct hlist_node *elem;
+	unsigned long flags;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+
+	/* freed object */
+	if (!(object->flags & OBJECT_ALLOCATED))
+		goto out;
+
+	if (hlist_empty(&object->area_list))
+		__scan_block((void *)(object->pointer + object->offset),
+			     (void *)(object->pointer + object->offset
+				      + object->size));
+	else
+		hlist_for_each_entry(area, elem, &object->area_list, node)
+			__scan_block((void *)(object->pointer + area->offset),
+				     (void *)(object->pointer + area->offset
+					      + area->length));
+
+ out:
+	spin_unlock_irqrestore(&memleak_lock, flags);
+}
+
+/* Scan the memory and print the orphan objects */
+static void memleak_scan(void)
+{
+	unsigned long flags;
+	struct memleak_object *object, *tmp;
+#ifdef CONFIG_DEBUG_MEMLEAK_TASK_STACKS
+	struct task_struct *task;
+#endif
+	int i;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(object, &object_list, object_list) {
+		spin_lock_irqsave(&memleak_lock, flags);
+
+		/* there should be a maximum of 1 reference to any
+		 * object at this point */
+		BUG_ON(object->use_count > 1);
+
+		/* reset the reference count (whiten the object) */
+		object->count = 0;
+		if (color_gray(object) && get_object(object))
+			list_add_tail(&object->gray_list, &gray_list);
+		else
+			object->report_thld--;
+
+		spin_unlock_irqrestore(&memleak_lock, flags);
+	}
+	rcu_read_unlock();
+
+	/* data/bss scanning */
+	scan_block(_sdata, _edata);
+	scan_block(__bss_start, __bss_stop);
+
+#ifdef CONFIG_SMP
+	/* per-cpu scanning */
+	for_each_possible_cpu(i)
+		scan_block(__per_cpu_start + per_cpu_offset(i),
+			   __per_cpu_end + per_cpu_offset(i));
+#endif
+
+	/* mem_map scanning */
+	for_each_online_node(i) {
+		struct page *page, *end;
+
+		page = NODE_MEM_MAP(i);
+		end  = page + NODE_DATA(i)->node_spanned_pages;
+
+		scan_block(page, end);
+	}
+
+#ifdef CONFIG_DEBUG_MEMLEAK_TASK_STACKS
+	read_lock(&tasklist_lock);
+	for_each_process(task)
+		scan_block(task_stack_page(task),
+			   task_stack_page(task) + THREAD_SIZE);
+	read_unlock(&tasklist_lock);
+#endif
+
+	/* scan the objects already referenced. More objects will be
+	 * referenced and, if there are no memory leaks, all the
+	 * objects will be scanned. The list traversal is safe for
+	 * both tail additions and removals from inside the loop. The
+	 * memleak objects cannot be freed from outside the loop
+	 * because their use_count was increased */
+	object = list_entry(gray_list.next, typeof(*object), gray_list);
+	while (&object->gray_list != &gray_list) {
+		/* may add new objects to the list */
+		scan_object(object);
+
+		tmp = list_entry(object->gray_list.next, typeof(*object),
+				 gray_list);
+
+		/* remove the object from the list and release it */
+		list_del(&object->gray_list);
+		put_object(object);
+
+		object = tmp;
+	}
+	BUG_ON(!list_empty(&gray_list));
+}
+
+static void *memleak_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct memleak_object *object;
+	loff_t n = *pos;
+	unsigned long flags;
+
+	if (!n) {
+		memleak_scan();
+		reported_leaks = 0;
+	}
+	if (reported_leaks >= CONFIG_DEBUG_MEMLEAK_REPORTS_NR)
+		return NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(object, &object_list, object_list) {
+		if (n-- > 0)
+			continue;
+
+		spin_lock_irqsave(&memleak_lock, flags);
+		if (get_object(object)) {
+			spin_unlock_irqrestore(&memleak_lock, flags);
+			goto out;
+		}
+		spin_unlock_irqrestore(&memleak_lock, flags);
+	}
+	object = NULL;
+ out:
+	rcu_read_unlock();
+	return object;
+}
+
+static void *memleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct list_head *n;
+	struct memleak_object *next = NULL;
+	unsigned long flags;
+
+	++(*pos);
+	if (reported_leaks >= CONFIG_DEBUG_MEMLEAK_REPORTS_NR)
+		return NULL;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+
+	n = ((struct memleak_object *)v)->object_list.next;
+	if (n != &object_list) {
+		next = list_entry(n, struct memleak_object, object_list);
+		/* still in the object_list, get_object() returns 1 */
+		get_object(next);
+	}
+
+	spin_unlock_irqrestore(&memleak_lock, flags);
+
+	put_object(v);
+	return next;
+}
+
+static void memleak_seq_stop(struct seq_file *seq, void *v)
+{
+	if (v)
+		put_object(v);
+}
+
+static int memleak_seq_show(struct seq_file *seq, void *v)
+{
+	const struct memleak_object *object = v;
+	unsigned long flags;
+	char namebuf[KSYM_NAME_LEN + 1] = "";
+	char *modname;
+	unsigned long symsize;
+	unsigned long offset = 0;
+	int i;
+
+	spin_lock_irqsave(&memleak_lock, flags);
+
+	if (!color_white(object))
+		goto out;
+	/* freed in the meantime (false positive) or just allocated */
+	if (!(object->flags & OBJECT_ALLOCATED))
+		goto out;
+	if (object->report_thld >= 0)
+		goto out;
+
+	reported_leaks++;
+	seq_printf(seq, "unreferenced object 0x%08lx (size %d):\n",
+		   object->pointer, object->size);
+
+	for (i = 0; i < object->trace_len; i++) {
+		unsigned long trace = object->trace[i];
+
+		kallsyms_lookup(trace, &symsize, &offset, &modname, namebuf);
+		seq_printf(seq, "  [<%08lx>] %s\n", trace, namebuf);
+	}
+
+ out:
+	spin_unlock_irqrestore(&memleak_lock, flags);
+	return 0;
+}
+
+static struct seq_operations memleak_seq_ops = {
+	.start = memleak_seq_start,
+	.next  = memleak_seq_next,
+	.stop  = memleak_seq_stop,
+	.show  = memleak_seq_show,
+};
+
+static int memleak_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &memleak_seq_ops);
+}
+
+static struct file_operations memleak_fops = {
+	.owner	 = THIS_MODULE,
+	.open    = memleak_seq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/* KMemLeak initialization. Set up the radix tree for the pointer aliases */
+void __init memleak_init(void)
+{
+	int i;
+	unsigned long flags;
+
+	hash_init();
+
+	object_cache = kmem_cache_create("memleak_object_cache", sizeof(struct memleak_object),
+					 0, SLAB_PANIC, NULL, NULL);
+	if (!object_cache)
+		panic("kmemleak: cannot create the object cache\n");
+
+	memleak_insert_aliases(__memleak_offsets_start, __memleak_offsets_end);
+
+	/* no need to hold the spinlock as SMP is not initialized
+	 * yet. Holding it here would lead to a deadlock */
+	local_irq_save(flags);
+
+	atomic_set(&memleak_initialized, 1);
+
+	if (preinit_pos >= PREINIT_OBJECTS)
+		panic("kmemleak: preinit objects buffer overflow: %d\n",
+		      preinit_pos);
+
+	/* execute the buffered memleak actions */
+	pr_debug("kmemleak: %d preinit actions\n", preinit_pos);
+	for (i = 0; i < preinit_pos; i++) {
+		struct memleak_preinit_object *object = &preinit_objects[i];
+
+		switch (object->type) {
+		case MEMLEAK_ALLOC:
+			memleak_alloc(object->pointer, object->size,
+				      object->ref_count);
+			break;
+		case MEMLEAK_FREE:
+			memleak_free(object->pointer);
+			break;
+		case MEMLEAK_PADDING:
+			memleak_padding(object->pointer, object->offset,
+					object->size);
+			break;
+		case MEMLEAK_NOT_LEAK:
+			memleak_not_leak(object->pointer);
+			break;
+		case MEMLEAK_IGNORE:
+			memleak_ignore(object->pointer);
+			break;
+		case MEMLEAK_SCAN_AREA:
+			memleak_scan_area(object->pointer,
+					  object->offset, object->size);
+			break;
+		case MEMLEAK_TYPEID:
+			memleak_typeid_raw(object->pointer, object->type_id);
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	local_irq_restore(flags);
+
+	printk(KERN_INFO "Kernel memory leak detector initialized\n");
+}
+
+/* Late initialization function */
+int __init memleak_late_init(void)
+{
+	struct dentry *dentry;
+
+	dentry = debugfs_create_file("memleak", S_IRUGO, NULL, NULL,
+				     &memleak_fops);
+	if (!dentry)
+		return -ENOMEM;
+
+	pr_debug("kmemleak: late initialization completed\n");
+
+	return 0;
+}
+late_initcall(memleak_late_init);

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 02/10] Kmemleak documentation
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
  2006-11-30 22:59 ` [PATCH 2.6.19 01/10] Base support for kmemleak Catalin Marinas
@ 2006-11-30 23:00 ` Catalin Marinas
  2006-11-30 23:00 ` [PATCH 2.6.19 03/10] Add the memory allocation/freeing hooks for kmemleak Catalin Marinas
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:00 UTC (permalink / raw)
  To: linux-kernel

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 Documentation/kmemleak.txt |  161 ++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 161 insertions(+), 0 deletions(-)

diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
new file mode 100644
index 0000000..4689f85
--- /dev/null
+++ b/Documentation/kmemleak.txt
@@ -0,0 +1,161 @@
+Kernel Memory Leak Detector
+===========================
+
+
+Introduction
+------------
+
+Kmemleak provides a way of detecting possible kernel memory leaks in a
+way similar to a tracing garbage collector
+(http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors),
+with the difference that the orphan objects are not freed but only
+reported via /sys/kernel/debug/memleak. A similar method is used by
+the Valgrind tool (memcheck --leak-check) to detect the memory leaks
+in user-space applications.
+
+
+Usage
+-----
+
+CONFIG_DEBUG_MEMLEAK has to be enabled. For additional config options,
+look in:
+
+  -> Kernel hacking
+    -> Kernel debugging
+      -> Debug slab memory allocations
+        -> Kernel memory leak detector
+
+To display the possible memory leaks:
+
+  # mount -t debugfs nodev /sys/kernel/debug/
+  # cat /sys/kernel/debug/memleak
+
+In order to reduce the run-time overhead, memory scanning is only
+performed when reading the /sys/kernel/debug/memleak file. Note that
+the orphan objects are listed in the order they were allocated and one
+object at the beginning of the list may cause other subsequent objects
+to be reported as orphan.
+
+
+Basic Algorithm
+---------------
+
+The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and
+friends are tracked and the pointers, together with additional
+information like size and stack trace, are stored in a hash table. The
+corresponding freeing function calls are tracked and the pointers
+removed from the hash table.
+
+An allocated block of memory is considered orphan if no pointer to its
+start address or to an alias (pointer aliases are explained later) can
+be found by scanning the memory (including saved registers). This
+means that there might be no way for the kernel to pass the address of
+the allocated block to a freeing function and therefore the block is
+considered a leak.
+
+The scanning algorithm steps:
+
+  1. mark all objects as white (remaining white objects will later be
+     considered orphan)
+  2. scan the memory starting with the data section and stacks,
+     checking the values against the addresses stored in the hash
+     table. If a pointer to a white object is found, the object is
+     added to the grey list
+  3. scan the grey objects for matching addresses (some white objects
+     can become grey and added at the end of the grey list) until the
+     grey set is finished
+  4. the remaining white objects are considered orphan and reported
+     via /sys/kernel/debug/memleak
+
+
+Improvements
+------------
+
+Because the Linux kernel calculates many pointers at run-time via the
+container_of macro (see the lists implementation), a lot of false
+positives would be reported. This tool re-writes the container_of
+macro so that the offset and type information is stored in the
+.init.memleak_offsets section. The memleak_init() function creates a
+radix tree with corresponding offsets for every encountered block
+type. The memory allocations hook stores the pointer address together
+with its aliases based on the type of the allocated block.
+
+While one level of offsets should be enough for most cases, a second
+level, i.e. container_of(container_of(...)), can be enabled via the
+configuration options (one false positive is the "struct socket_alloc"
+allocation in the sock_alloc_inode() function).
+
+Some allocated memory blocks have pointers stored in the kernel's
+internal data structures and they cannot be detected as orphans. To
+avoid this, kmemleak can also store the number of values equal to the
+pointer (or aliases) that need to be found so that the block is not
+considered a leak. One example is __vmalloc().
+
+
+Limitations and Drawbacks
+-------------------------
+
+The biggest drawback is the reduced performance of memory allocation
+and freeing. To avoid other penalties, the memory scanning is only
+performed when the /sys/kernel/debug/memleak file is read. Anyway,
+this tool is intended for debugging purposes where the performance
+might not be the most important requirement.
+
+Kmemleak currently approximates the type id using the sizeof()
+compiler built-in function. This is not accurate and can lead to false
+negatives. The aim is to gradually change the kernel and kmemleak to
+do more precise type identification.
+
+Another source of false negatives is the data stored in non-pointer
+values. Together with the more precise type identification, kmemleak
+could only scan the pointer members in the allocated structures.
+
+The tool can report false positives. These are cases where an
+allocated block doesn't need to be freed (some cases in the init_call
+functions), the pointer is calculated by other methods than the
+container_of macro or the pointer is stored in a location not scanned
+by kmemleak. If the "member" argument in the offsetof(type, member)
+call is not constant, kmemleak considers the offset as zero since it
+cannot be determined at compilation time.
+
+Page allocations and ioremap are not tracked. Only the ARM and i386
+architectures are currently supported.
+
+
+Kmemleak API
+------------
+
+See the include/linux/memleak.h header for the functions prototype.
+
+memleak_init		- initialize kmemleak
+memleak_alloc		- notify of a memory block allocation
+memleak_free		- notify of a memory block freeing
+memleak_padding		- mark the boundaries of the data inside the block
+memleak_not_leak	- mark an object as not a leak
+memleak_ignore		- do not scan or report an object as leak
+memleak_scan_area	- add scan areas inside a memory block
+memleak_insert_aliases	- add aliases for a given type
+memleak_erase		- erase an old value in a pointer variable
+memleak_typeid_raw	- set the typeid for an allocated block
+memleak_container	- statically declare a pointer alias
+memleak_typeid		- set the typeid for an allocated block (takes
+			  a type rather than typeid as argument)
+
+
+Dealing with false positives/negatives
+--------------------------------------
+
+To reduce the false negatives, kmemleak provides the memleak_ignore,
+memleak_scan_area and memleak_erase functions. The task stacks also
+increase the amount of false negatives and their scanning is not
+enabled by default.
+
+To eliminate the false positives caused by code allocating a different
+size from the object one (either for alignment or for extra memory
+after the end of the structure), kmemleak provides the memleak_padding
+and memleak_typeid functions.
+
+For objects known not to be leaks, kmemleak provides the
+memleak_not_leak function. The memleak_ignore could also be used if
+the memory block is known not to contain other pointers and it will no
+longer be scanned.

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 03/10] Add the memory allocation/freeing hooks for kmemleak
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
  2006-11-30 22:59 ` [PATCH 2.6.19 01/10] Base support for kmemleak Catalin Marinas
  2006-11-30 23:00 ` [PATCH 2.6.19 02/10] Kmemleak documentation Catalin Marinas
@ 2006-11-30 23:00 ` Catalin Marinas
  2006-11-30 23:00 ` [PATCH 2.6.19 04/10] Modules support " Catalin Marinas
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:00 UTC (permalink / raw)
  To: linux-kernel

This patch adds the callbacks to memleak_(alloc|free) functions from
kmalloc/kfree, kmem_cache_(alloc|free), vmalloc/vfree etc.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 include/linux/slab.h |    6 ++++++
 mm/page_alloc.c      |    2 ++
 mm/slab.c            |   19 +++++++++++++++++--
 mm/vmalloc.c         |   22 ++++++++++++++++++++--
 4 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index c4947b8..cbb8e47 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -125,6 +125,8 @@ extern void *__kmalloc(size_t, gfp_t);
  */
 static inline void *kmalloc(size_t size, gfp_t flags)
 {
+#ifndef CONFIG_DEBUG_MEMLEAK
+	/* this block removes the size information needed by kmemleak */
 	if (__builtin_constant_p(size)) {
 		int i = 0;
 #define CACHE(x) \
@@ -143,6 +145,7 @@ found:
 			malloc_sizes[i].cs_dmacachep :
 			malloc_sizes[i].cs_cachep, flags);
 	}
+#endif
 	return __kmalloc(size, flags);
 }
 
@@ -172,6 +175,8 @@ extern void *__kzalloc(size_t, gfp_t);
  */
 static inline void *kzalloc(size_t size, gfp_t flags)
 {
+#ifndef CONFIG_DEBUG_MEMLEAK
+	/* this block removes the size information needed by kmemleak */
 	if (__builtin_constant_p(size)) {
 		int i = 0;
 #define CACHE(x) \
@@ -190,6 +195,7 @@ found:
 			malloc_sizes[i].cs_dmacachep :
 			malloc_sizes[i].cs_cachep, flags);
 	}
+#endif
 	return __kzalloc(size, flags);
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aa6fcc7..2176d03 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3107,6 +3107,8 @@ void *__init alloc_large_system_hash(con
 	if (_hash_mask)
 		*_hash_mask = (1 << log2qty) - 1;
 
+	memleak_alloc(table, size, 1);
+
 	return table;
 }
 
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e3..93aead5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2549,6 +2549,9 @@ static struct slab *alloc_slabmgmt(struc
 		/* Slab management obj is off-slab. */
 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
 					      local_flags, nodeid);
+		/* only scan the list member to avoid false negatives */
+		memleak_scan_area(slabp, offsetof(struct slab, list),
+				  sizeof(struct list_head));
 		if (!slabp)
 			return NULL;
 	} else {
@@ -3084,6 +3087,8 @@ static inline void *____cache_alloc(stru
 		STATS_INC_ALLOCMISS(cachep);
 		objp = cache_alloc_refill(cachep, flags);
 	}
+	/* avoid false negatives */
+	memleak_erase(&ac->entry[ac->avail]);
 	return objp;
 }
 
@@ -3112,6 +3117,7 @@ static __always_inline void *__cache_all
 	local_irq_restore(save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
 					    caller);
+	memleak_alloc(objp, obj_size(cachep), 1);
 	prefetchw(objp);
 	return objp;
 }
@@ -3338,6 +3344,7 @@ static inline void __cache_free(struct k
 	struct array_cache *ac = cpu_cache_get(cachep);
 
 	check_irq_off();
+	memleak_free(objp);
 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
 	if (cache_free_alien(cachep, objp))
@@ -3457,6 +3464,7 @@ void *kmem_cache_alloc_node(struct kmem_
 
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
 					   __builtin_return_address(0));
+	memleak_alloc(ptr, obj_size(cachep), 1);
 
 	return ptr;
 }
@@ -3465,11 +3473,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	struct kmem_cache *cachep;
+	void *ptr;
 
 	cachep = kmem_find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
 		return NULL;
-	return kmem_cache_alloc_node(cachep, flags, node);
+	ptr = kmem_cache_alloc_node(cachep, flags, node);
+	memleak_padding(ptr, 0, size);
+	return ptr;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
@@ -3484,6 +3495,7 @@ static __always_inline void *__do_kmallo
 					  void *caller)
 {
 	struct kmem_cache *cachep;
+	void *ptr;
 
 	/* If you want to save a few bytes .text space: replace
 	 * __ with kmem_.
@@ -3493,7 +3505,10 @@ static __always_inline void *__do_kmallo
 	cachep = __find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
 		return NULL;
-	return __cache_alloc(cachep, flags, caller);
+	ptr = __cache_alloc(cachep, flags, caller);
+	memleak_padding(ptr, 0, size);
+
+	return ptr;
 }
 
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86897ee..603aee9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -365,6 +365,9 @@ void __vunmap(void *addr, int deallocate
 void vfree(void *addr)
 {
 	BUG_ON(in_interrupt());
+
+	memleak_free(addr);
+
 	__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);
@@ -465,7 +468,14 @@ fail:
 
 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
 {
-	return __vmalloc_area_node(area, gfp_mask, prot, -1);
+	void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1);
+
+	/* this needs ref_count = 2 since vm_struct also contains a
+	 * pointer to this address. The guard page is also subtracted
+	 * from the size */
+	memleak_alloc(addr, area->size - PAGE_SIZE, 2);
+
+	return addr;
 }
 
 /**
@@ -483,6 +493,8 @@ static void *__vmalloc_node(unsigned lon
 			    int node)
 {
 	struct vm_struct *area;
+	void *addr;
+	unsigned long real_size = size;
 
 	size = PAGE_ALIGN(size);
 	if (!size || (size >> PAGE_SHIFT) > num_physpages)
@@ -492,7 +504,13 @@ static void *__vmalloc_node(unsigned lon
 	if (!area)
 		return NULL;
 
-	return __vmalloc_area_node(area, gfp_mask, prot, node);
+	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
+
+	/* this needs ref_count = 2 since the vm_struct also contains
+	   a pointer to this address */
+	memleak_alloc(addr, real_size, 2);
+
+	return addr;
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 04/10] Modules support for kmemleak
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (2 preceding siblings ...)
  2006-11-30 23:00 ` [PATCH 2.6.19 03/10] Add the memory allocation/freeing hooks for kmemleak Catalin Marinas
@ 2006-11-30 23:00 ` Catalin Marinas
  2006-11-30 23:00 ` [PATCH 2.6.19 05/10] Add kmemleak support for i386 Catalin Marinas
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:00 UTC (permalink / raw)
  To: linux-kernel

This patch handles the kmemleak operations needed for modules loading so
that memory allocations from inside a module are properly tracked.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 kernel/module.c |   56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 56 insertions(+), 0 deletions(-)

diff --git a/kernel/module.c b/kernel/module.c
index f016656..b2b9526 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -345,6 +345,7 @@ static void *percpu_modalloc(unsigned lo
 	unsigned long extra;
 	unsigned int i;
 	void *ptr;
+	int cpu;
 
 	if (align > SMP_CACHE_BYTES) {
 		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
@@ -374,6 +375,10 @@ static void *percpu_modalloc(unsigned lo
 			if (!split_block(i, size))
 				return NULL;
 
+		/* add the per-cpu scanning areas */
+		for_each_possible_cpu(cpu)
+			memleak_alloc(ptr + per_cpu_offset(cpu), size, 0);
+
 		/* Mark allocated */
 		pcpu_size[i] = -pcpu_size[i];
 		return ptr;
@@ -388,6 +393,7 @@ static void percpu_modfree(void *freeme)
 {
 	unsigned int i;
 	void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
+	int cpu;
 
 	/* First entry is core kernel percpu data. */
 	for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -399,6 +405,10 @@ static void percpu_modfree(void *freeme)
 	BUG();
 
  free:
+	/* remove the per-cpu scanning areas */
+	for_each_possible_cpu(cpu)
+		memleak_free(freeme + per_cpu_offset(cpu));
+
 	/* Merge with previous? */
 	if (pcpu_size[i-1] >= 0) {
 		pcpu_size[i-1] += pcpu_size[i];
@@ -1477,6 +1487,42 @@ static inline void add_kallsyms(struct m
 }
 #endif /* CONFIG_KALLSYMS */
 
+#ifdef CONFIG_DEBUG_MEMLEAK
+static void memleak_load_module(struct module *mod, Elf_Ehdr *hdr,
+				Elf_Shdr *sechdrs, char *secstrings)
+{
+	unsigned int mloffindex, i;
+
+	/* insert any new pointer aliases */
+	mloffindex = find_sec(hdr, sechdrs, secstrings, ".init.memleak_offsets");
+	if (mloffindex)
+		memleak_insert_aliases((void *)sechdrs[mloffindex].sh_addr,
+				       (void *)sechdrs[mloffindex].sh_addr
+				       + sechdrs[mloffindex].sh_size);
+
+	/* only scan the sections containing data */
+	memleak_scan_area(mod->module_core,
+			  (unsigned long)mod - (unsigned long)mod->module_core,
+			  sizeof(struct module));
+
+	for (i = 1; i < hdr->e_shnum; i++) {
+		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+			continue;
+		if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
+		    && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
+			continue;
+
+		memleak_scan_area(mod->module_core,
+				  sechdrs[i].sh_addr - (unsigned long)mod->module_core,
+				  sechdrs[i].sh_size);
+	}
+}
+#else
+static inline void memleak_load_module(struct module *mod, Elf_Ehdr *hdr,
+				       Elf_Shdr *sechdrs, char *secstrings)
+{ }
+#endif
+
 /* Allocate and load the module: note that size of section 0 is always
    zero, and we rely on this for optional sections. */
 static struct module *load_module(void __user *umod,
@@ -1672,6 +1718,10 @@ static struct module *load_module(void _
 
 	/* Do the allocs. */
 	ptr = module_alloc(mod->core_size);
+	/* the pointer to this block is stored in the module structure
+	 * which is inside the block. Just mark it as not being a
+	 * leak */
+	memleak_not_leak(ptr);
 	if (!ptr) {
 		err = -ENOMEM;
 		goto free_percpu;
@@ -1680,6 +1730,11 @@ static struct module *load_module(void _
 	mod->module_core = ptr;
 
 	ptr = module_alloc(mod->init_size);
+	/* the pointer to this block is stored in the module structure
+	 * which is inside the block. This block doesn't need to be
+	 * scanned as it contains data and code that will be freed
+	 * after the module is initialized */
+	memleak_ignore(ptr);
 	if (!ptr && mod->init_size) {
 		err = -ENOMEM;
 		goto free_core;
@@ -1710,6 +1765,7 @@ static struct module *load_module(void _
 	}
 	/* Module has been moved. */
 	mod = (void *)sechdrs[modindex].sh_addr;
+	memleak_load_module(mod, hdr, sechdrs, secstrings);
 
 	/* Now we've moved module, initialize linked lists, etc. */
 	module_unload_init(mod);

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 05/10] Add kmemleak support for i386
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (3 preceding siblings ...)
  2006-11-30 23:00 ` [PATCH 2.6.19 04/10] Modules support " Catalin Marinas
@ 2006-11-30 23:00 ` Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 06/10] Add kmemleak support for ARM Catalin Marinas
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:00 UTC (permalink / raw)
  To: linux-kernel

This patch modifies the vmlinux.lds.S script and adds the backtrace support
for i386 to be used with kmemleak.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 arch/i386/kernel/vmlinux.lds.S |    4 ++++
 include/asm-i386/thread_info.h |   10 +++++++++-
 2 files changed, 13 insertions(+), 1 deletions(-)

diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index c6f84a0..499c32b 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
 
   /* writeable */
   . = ALIGN(4096);
+  _sdata = .;			/* Start of data section */
   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
 	*(.data)
 	CONSTRUCTORS
@@ -157,6 +158,9 @@ SECTIONS
   __per_cpu_start = .;
   .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
   __per_cpu_end = .;
+  __memleak_offsets_start = .;
+  .init.memleak_offsets : AT(ADDR(.init.memleak_offsets) - LOAD_OFFSET) { *(.init.memleak_offsets) }
+  __memleak_offsets_end = .;
   . = ALIGN(4096);
   __init_end = .;
   /* freed after init ends here */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 54d6d7a..054553f 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -100,12 +100,20 @@ static inline struct thread_info *curren
 		struct thread_info *ret;			\
 								\
 		ret = kmalloc(THREAD_SIZE, GFP_KERNEL);		\
+		memleak_ignore(ret);				\
 		if (ret)					\
 			memset(ret, 0, THREAD_SIZE);		\
 		ret;						\
 	})
 #else
-#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
+#define alloc_thread_info(tsk)					\
+	({							\
+		struct thread_info *ret;			\
+								\
+		ret = kmalloc(THREAD_SIZE, GFP_KERNEL);		\
+		memleak_ignore(ret);				\
+		ret;						\
+	})
 #endif
 
 #define free_thread_info(info)	kfree(info)

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 06/10] Add kmemleak support for ARM
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (4 preceding siblings ...)
  2006-11-30 23:00 ` [PATCH 2.6.19 05/10] Add kmemleak support for i386 Catalin Marinas
@ 2006-11-30 23:01 ` Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 07/10] Remove some of the kmemleak false positives Catalin Marinas
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:01 UTC (permalink / raw)
  To: linux-kernel

This patch modifies the vmlinux.lds.S script and adds the backtrace support
for ARM to be used with kmemleak.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 arch/arm/kernel/vmlinux.lds.S |    7 +++++++
 1 files changed, 7 insertions(+), 0 deletions(-)

diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a8fa75e..7ec22ad 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -61,6 +61,11 @@ SECTIONS
 		__per_cpu_start = .;
 			*(.data.percpu)
 		__per_cpu_end = .;
+#ifdef CONFIG_DEBUG_MEMLEAK
+		__memleak_offsets_start = .;
+			*(.init.memleak_offsets)
+		__memleak_offsets_end = .;
+#endif
 #ifndef CONFIG_XIP_KERNEL
 		__init_begin = _stext;
 		*(.init.data)
@@ -109,6 +114,7 @@ SECTIONS
 
 	.data : AT(__data_loc) {
 		__data_start = .;	/* address in memory */
+		_sdata = .;
 
 		/*
 		 * first, the init task union, aligned
@@ -159,6 +165,7 @@ SECTIONS
 		__bss_start = .;	/* BSS				*/
 		*(.bss)
 		*(COMMON)
+		__bss_stop = .;
 		_end = .;
 	}
 					/* Stabs debugging sections.	*/

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 07/10] Remove some of the kmemleak false positives
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (5 preceding siblings ...)
  2006-11-30 23:01 ` [PATCH 2.6.19 06/10] Add kmemleak support for ARM Catalin Marinas
@ 2006-11-30 23:01 ` Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 08/10] Keep the __init functions after initialization Catalin Marinas
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:01 UTC (permalink / raw)
  To: linux-kernel

There are allocations for which the main pointer cannot be found but they
are not memory leaks. This patch fixes some of them.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 arch/i386/kernel/setup.c               |    2 ++
 drivers/base/platform.c                |    3 +++
 drivers/char/vt.c                      |    4 ++++
 drivers/hwmon/w83627hf.c               |    4 ++++
 drivers/scsi/hosts.c                   |    3 +++
 drivers/video/console/fbcon.c          |    3 +++
 fs/ext3/dir.c                          |    3 +++
 include/linux/percpu.h                 |    5 +++++
 ipc/util.c                             |    6 ++++++
 kernel/params.c                        |    8 +++++++-
 net/core/dev.c                         |    6 ++++++
 net/core/skbuff.c                      |    3 +++
 net/ipv4/netfilter/ip_conntrack_core.c |    5 +++++
 net/sched/sch_generic.c                |    5 +++++
 14 files changed, 59 insertions(+), 1 deletions(-)

diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 141041d..610a725 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1487,6 +1487,8 @@ static __init int add_pcspkr(void)
 	int ret;
 
 	pd = platform_device_alloc("pcspkr", -1);
+	/* mark it as not a leak since this device doesn't need to be freed */
+	memleak_not_leak(pd);
 	if (!pd)
 		return -ENOMEM;
 
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 940ce41..d331b54 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -166,6 +166,9 @@ struct platform_device *platform_device_
 	struct platform_object *pa;
 
 	pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL);
+	/* kmemleak cannot guess the object type because the block
+	 * size is different from the object size */
+	memleak_typeid(pa, struct platform_object);
 	if (pa) {
 		strcpy(pa->name, name);
 		pa->pdev.name = pa->name;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 8e4413f..614d7e9 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2640,6 +2640,10 @@ static int __init con_init(void)
 	 */
 	for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
 		vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data));
+		/* kmemleak does not track the memory allocated via
+		 * alloc_bootmem() but this block contains pointers to
+		 * other blocks allocated via kmalloc */
+		memleak_alloc(vc, sizeof(struct vc_data), 1);
 		visual_init(vc, currcons, 1);
 		vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size);
 		vc->vc_kmalloced = 0;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index dfdc29c..6f5c70f 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1097,6 +1097,10 @@ static int w83627hf_detect(struct i2c_ad
 		err = -ENOMEM;
 		goto ERROR1;
 	}
+	/* the pointer to member is stored but the code doesn't use
+	 * container_of for access and the alias need to be
+	 * explicitely declared here */
+	memleak_container(struct w83627hf_data, client);
 
 	new_client = &data->client;
 	i2c_set_clientdata(new_client, data);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 68ef163..34b9d41 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -300,6 +300,9 @@ struct Scsi_Host *scsi_host_alloc(struct
 	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
 	if (!shost)
 		return NULL;
+	/* kmemleak cannot guess the object type because the block
+	 * size is different from the object size */
+	memleak_typeid(shost, struct Scsi_Host);
 
 	spin_lock_init(&shost->default_lock);
 	scsi_assign_lock(shost, &shost->default_lock);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b..b482cac 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2485,6 +2485,9 @@ static int fbcon_set_font(struct vc_data
 	size = h * pitch * charcount;
 
 	new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
+	/* the stored pointer is different from the address of the
+	 * allocated block because of padding */
+	memleak_padding(new_data, FONT_EXTRA_WORDS * sizeof(int), size);
 
 	if (!new_data)
 		return -ENOMEM;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index d0b54f3..5a102ce 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -349,6 +349,9 @@ int ext3_htree_store_dirent(struct file
 	new_fn = kzalloc(len, GFP_KERNEL);
 	if (!new_fn)
 		return -ENOMEM;
+	/* kmemleak cannot guess the object type because the block
+	 * size is different from the object size */
+	memleak_typeid(new_fn, struct fname);
 	new_fn->hash = hash;
 	new_fn->minor_hash = minor_hash;
 	new_fn->inode = le32_to_cpu(dirent->inode);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 600e3d3..899776c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -30,7 +30,12 @@ struct percpu_data {
 	void *ptrs[NR_CPUS];
 };
 
+/* pointer disguising messes up the kmemleak objects tracking */
+#ifndef CONFIG_DEBUG_MEMLEAK
 #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
+#else
+#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
+#endif
 /* 
  * Use this to get to a cpu's version of the per-cpu object dynamically
  * allocated. Non-atomic access to the current CPU's version should
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14..6ac2b7e 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -492,6 +492,9 @@ void* ipc_rcu_alloc(int size)
 	 */
 	if (rcu_use_vmalloc(size)) {
 		out = vmalloc(HDRLEN_VMALLOC + size);
+		/* the stored pointer is different from the address of
+		 * the allocated block because of padding */
+		memleak_padding(out, HDRLEN_VMALLOC, size);
 		if (out) {
 			out += HDRLEN_VMALLOC;
 			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
@@ -499,6 +502,9 @@ void* ipc_rcu_alloc(int size)
 		}
 	} else {
 		out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
+		/* the stored pointer is different from the address of
+		 * the allocated block because of padding */
+		memleak_padding(out, HDRLEN_KMALLOC, size);
 		if (out) {
 			out += HDRLEN_KMALLOC;
 			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
diff --git a/kernel/params.c b/kernel/params.c
index f406655..1510d89 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -548,6 +548,7 @@ static void __init kernel_param_sysfs_se
 {
 	struct module_kobject *mk;
 	int ret;
+	struct module_param_attrs *mp;
 
 	mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
 	BUG_ON(!mk);
@@ -558,8 +559,13 @@ static void __init kernel_param_sysfs_se
 	ret = kobject_register(&mk->kobj);
 	BUG_ON(ret < 0);
 
+	mp = param_sysfs_setup(mk, kparam, num_params, name_skip);
+	/* this structure is not freed but the pointer is
+	 * lost. However, there are other pointers to its members and
+	 * the object has to be kept */
+	memleak_not_leak(mp);
 	/* no need to keep the kobject if no parameter is exported */
-	if (!param_sysfs_setup(mk, kparam, num_params, name_skip)) {
+	if (!mp) {
 		kobject_unregister(&mk->kobj);
 		kfree(mk);
 	}
diff --git a/net/core/dev.c b/net/core/dev.c
index 81c426a..8ee5c01 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3210,6 +3210,12 @@ struct net_device *alloc_netdev(int size
 	dev = (struct net_device *)
 		(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
 	dev->padded = (char *)dev - (char *)p;
+	/* kmemleak cannot guess the object type because the block
+	 * size is different from the object size. The stored pointer
+	 * is also different from the address of the allocated block
+	 * because of padding */
+	memleak_padding(p, dev->padded, alloc_size - dev->padded);
+	memleak_typeid(p, struct net_device);
 
 	if (sizeof_priv)
 		dev->priv = netdev_priv(dev);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8b1063..247bbe6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -151,6 +151,9 @@ struct sk_buff *__alloc_skb(unsigned int
 
 	/* Get the HEAD */
 	skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+	/* the skbuff_fclone_cache contains objects larger than
+	 * "struct sk_buff" and kmemleak cannot guess the type */
+	memleak_typeid(skb, struct sk_buff);
 	if (!skb)
 		goto out;
 
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 8b848aa..6013147 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -645,6 +645,11 @@ struct ip_conntrack *ip_conntrack_alloc(
 	}
 
 	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
+	/* tuplehash_to_ctrack doesn't pass a constant argument to
+	 * container_of and therefore the conntrack->tuplehash[].list
+	 * aliases are ignored */
+	memleak_container(struct ip_conntrack, tuplehash[IP_CT_DIR_ORIGINAL]);
+	memleak_container(struct ip_conntrack, tuplehash[IP_CT_DIR_REPLY]);
 	if (!conntrack) {
 		DEBUGP("Can't allocate conntrack.\n");
 		atomic_dec(&ip_conntrack_count);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 88c6a99..307c3fb 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -434,6 +434,11 @@ struct Qdisc *qdisc_alloc(struct net_dev
 		goto errout;
 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 	sch->padded = (char *) sch - (char *) p;
+	/* kmemleak cannot guess the object type because the block
+	 * size is different from the object size. The stored pointer
+	 * is also different from the address of the allocated block
+	 * because of padding */
+	memleak_padding(p, sch->padded, sizeof(struct Qdisc));
 
 	INIT_LIST_HEAD(&sch->list);
 	skb_queue_head_init(&sch->q);

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 08/10] Keep the __init functions after initialization
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (6 preceding siblings ...)
  2006-11-30 23:01 ` [PATCH 2.6.19 07/10] Remove some of the kmemleak false positives Catalin Marinas
@ 2006-11-30 23:01 ` Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 09/10] Simple testing for kmemleak Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 10/10] Update the MAINTAINERS file " Catalin Marinas
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:01 UTC (permalink / raw)
  To: linux-kernel

This patch adds the CONFIG_DEBUG_KEEP_INIT option which preserves the
.init.text section after initialization. Memory leaks happening during this
phase can be more easily tracked.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 include/linux/init.h |    5 +++++
 lib/Kconfig.debug    |   12 ++++++++++++
 2 files changed, 17 insertions(+), 0 deletions(-)

diff --git a/include/linux/init.h b/include/linux/init.h
index 5eb5d24..d9e92aa 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,8 +40,13 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
+#ifdef CONFIG_DEBUG_KEEP_INIT
+#define __init
+#define __initdata
+#else
 #define __init		__attribute__ ((__section__ (".init.text")))
 #define __initdata	__attribute__ ((__section__ (".init.data")))
+#endif
 #define __exitdata	__attribute__ ((__section__(".exit.data")))
 #define __exit_call	__attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2dda93b..e747478 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -226,6 +226,18 @@ config DEBUG_MEMLEAK_REPORTS_NR
 	  reading the /sys/kernel/debug/memleak file could lead to
 	  some soft-locks.
 
+config DEBUG_KEEP_INIT
+	bool "Do not free the __init code/data"
+	default n
+	depends on DEBUG_MEMLEAK
+	help
+	  This option moves the __init code/data out of the
+	  .init.text/.init.data sections. It is useful for identifying
+	  memory leaks happening during the kernel or modules
+	  initialization.
+
+	  If unsure, say N.
+
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 09/10] Simple testing for kmemleak
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (7 preceding siblings ...)
  2006-11-30 23:01 ` [PATCH 2.6.19 08/10] Keep the __init functions after initialization Catalin Marinas
@ 2006-11-30 23:01 ` Catalin Marinas
  2006-11-30 23:01 ` [PATCH 2.6.19 10/10] Update the MAINTAINERS file " Catalin Marinas
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:01 UTC (permalink / raw)
  To: linux-kernel

This patch only contains some very simple testing at the moment. Proper
testing will be needed.

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 lib/Kconfig.debug |   11 ++++++
 mm/Makefile       |    1 +
 mm/memleak-test.c |   94 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+), 0 deletions(-)

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e747478..cd2fd14 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -238,6 +238,17 @@ config DEBUG_KEEP_INIT
 
 	  If unsure, say N.
 
+config DEBUG_MEMLEAK_TEST
+	tristate "Test the kernel memory leak detector"
+	default n
+	depends on DEBUG_MEMLEAK
+	help
+	  Say Y or M here to build the test harness for the kernel
+	  memory leak detector. At the moment, this option enables a
+	  module that explicitly leaks memory.
+
+	  If unsure, say N.
+
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
diff --git a/mm/Makefile b/mm/Makefile
index aafe03f..9f5d450 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
 obj-$(CONFIG_DEBUG_MEMLEAK) += memleak.o
+obj-$(CONFIG_DEBUG_MEMLEAK_TEST) += memleak-test.o
diff --git a/mm/memleak-test.c b/mm/memleak-test.c
new file mode 100644
index 0000000..51320d8
--- /dev/null
+++ b/mm/memleak-test.c
@@ -0,0 +1,94 @@
+/*
+ * mm/memleak-test.c
+ *
+ * Copyright (C) 2006 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+
+#include <linux/memleak.h>
+
+struct test_node {
+	long header[25];
+	struct list_head list;
+	long footer[25];
+};
+
+static LIST_HEAD(test_list);
+static DEFINE_PER_CPU(void *, test_pointer);
+
+/* Some very simple testing. This function needs to be extended for
+ * proper testing */
+static int __init memleak_test_init(void)
+{
+	struct test_node *elem;
+	int i;
+
+	printk(KERN_INFO "KMemLeak testing\n");
+
+	/* make some orphan objects */
+	kmalloc(32, GFP_KERNEL);
+	kmalloc(32, GFP_KERNEL);
+	kmalloc(1024, GFP_KERNEL);
+	kmalloc(1024, GFP_KERNEL);
+	kmalloc(2048, GFP_KERNEL);
+	kmalloc(2048, GFP_KERNEL);
+	kmalloc(4096, GFP_KERNEL);
+	kmalloc(4096, GFP_KERNEL);
+#ifndef CONFIG_MODULES
+	kmem_cache_alloc(files_cachep, GFP_KERNEL);
+	kmem_cache_alloc(files_cachep, GFP_KERNEL);
+#endif
+	vmalloc(64);
+	vmalloc(64);
+
+	/* add elements to a list. They should only appear as orphan
+	 * after the module is removed */
+	for (i = 0; i < 10; i++) {
+		elem = kmalloc(sizeof(*elem), GFP_KERNEL);
+		if (!elem)
+			return -ENOMEM;
+		memset(elem, 0, sizeof(*elem));
+		INIT_LIST_HEAD(&elem->list);
+
+		list_add_tail(&elem->list, &test_list);
+	}
+
+	for_each_possible_cpu(i)
+		per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL);
+
+	return 0;
+}
+module_init(memleak_test_init);
+
+static void __exit memleak_test_exit(void)
+{
+	struct test_node *elem, *tmp;
+
+	/* remove the list elements without actually freeing the memory */
+	list_for_each_entry_safe(elem, tmp, &test_list, list)
+		list_del(&elem->list);
+}
+module_exit(memleak_test_exit);
+
+MODULE_LICENSE("GPL");

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2.6.19 10/10] Update the MAINTAINERS file for kmemleak
  2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
                   ` (8 preceding siblings ...)
  2006-11-30 23:01 ` [PATCH 2.6.19 09/10] Simple testing for kmemleak Catalin Marinas
@ 2006-11-30 23:01 ` Catalin Marinas
  9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2006-11-30 23:01 UTC (permalink / raw)
  To: linux-kernel

Signed-off-by: Catalin Marinas <catalin.marinas@gmail.com>
---

 MAINTAINERS |    6 ++++++
 1 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index e182992..995486d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1722,6 +1722,12 @@ L:	kernel-janitors@lists.osdl.org
 W:	http://www.kerneljanitors.org/
 S:	Maintained
 
+KERNEL MEMORY LEAK DETECTOR
+P:	Catalin Marinas
+M:	catalin.marinas@gmail.com
+W:	http://www.procode.org/kmemleak/
+S:	Maintained
+
 KERNEL NFSD
 P:	Neil Brown
 M:	neilb@cse.unsw.edu.au

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2006-11-30 23:02 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-11-30 22:59 [PATCH 2.6.19 00/10] Kernel memory leak detector 0.12 Catalin Marinas
2006-11-30 22:59 ` [PATCH 2.6.19 01/10] Base support for kmemleak Catalin Marinas
2006-11-30 23:00 ` [PATCH 2.6.19 02/10] Kmemleak documentation Catalin Marinas
2006-11-30 23:00 ` [PATCH 2.6.19 03/10] Add the memory allocation/freeing hooks for kmemleak Catalin Marinas
2006-11-30 23:00 ` [PATCH 2.6.19 04/10] Modules support " Catalin Marinas
2006-11-30 23:00 ` [PATCH 2.6.19 05/10] Add kmemleak support for i386 Catalin Marinas
2006-11-30 23:01 ` [PATCH 2.6.19 06/10] Add kmemleak support for ARM Catalin Marinas
2006-11-30 23:01 ` [PATCH 2.6.19 07/10] Remove some of the kmemleak false positives Catalin Marinas
2006-11-30 23:01 ` [PATCH 2.6.19 08/10] Keep the __init functions after initialization Catalin Marinas
2006-11-30 23:01 ` [PATCH 2.6.19 09/10] Simple testing for kmemleak Catalin Marinas
2006-11-30 23:01 ` [PATCH 2.6.19 10/10] Update the MAINTAINERS file " Catalin Marinas

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.