All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/3] mm: cma: debugfs access to CMA
@ 2015-01-26 14:26 Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 1/3] mm: cma: debugfs interface Sasha Levin
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Sasha Levin @ 2015-01-26 14:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: iamjoonsoo.kim, m.szyprowski, akpm, lauraa, Sasha Levin

I've noticed that there is no interfaces exposed by CMA which would let me
fuzz what's going on in there.

This small patch set exposes some information out to userspace, plus adds
the ability to trigger allocation and freeing from userspace.

Changes from v1:
 - Make allocation and free hooks per-cma.
 - Remove additional debug prints.

Sasha Levin (3):
  mm: cma: debugfs interface
  mm: cma: allocation trigger
  mm: cma: release trigger

 mm/Kconfig     |    6 ++
 mm/Makefile    |    1 +
 mm/cma.c       |   19 ++----
 mm/cma.h       |   20 +++++++
 mm/cma_debug.c |  175 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 206 insertions(+), 15 deletions(-)
 create mode 100644 mm/cma.h
 create mode 100644 mm/cma_debug.c

-- 
1.7.10.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 1/3] mm: cma: debugfs interface
  2015-01-26 14:26 [PATCH v2 0/3] mm: cma: debugfs access to CMA Sasha Levin
@ 2015-01-26 14:26 ` Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 2/3] mm: cma: allocation trigger Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 3/3] mm: cma: release trigger Sasha Levin
  2 siblings, 0 replies; 11+ messages in thread
From: Sasha Levin @ 2015-01-26 14:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: iamjoonsoo.kim, m.szyprowski, akpm, lauraa, Sasha Levin

Implement a simple debugfs interface to expose information about CMA areas
in the system.

Useful for testing/sanity checks for CMA since it was impossible to previously
retrieve this information in userspace.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
 mm/Kconfig     |    6 ++++++
 mm/Makefile    |    1 +
 mm/cma.c       |   19 ++++--------------
 mm/cma.h       |   20 +++++++++++++++++++
 mm/cma_debug.c |   61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 92 insertions(+), 15 deletions(-)
 create mode 100644 mm/cma.h
 create mode 100644 mm/cma_debug.c

diff --git a/mm/Kconfig b/mm/Kconfig
index a03131b..390214d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -517,6 +517,12 @@ config CMA_DEBUG
 	  processing calls such as dma_alloc_from_contiguous().
 	  This option does not affect warning and error messages.
 
+config CMA_DEBUGFS
+	bool "CMA debugfs interface"
+	depends on CMA && DEBUG_FS
+	help
+	  Turns on the DebugFS interface for CMA.
+
 config CMA_AREAS
 	int "Maximum count of the CMA areas"
 	depends on CMA
diff --git a/mm/Makefile b/mm/Makefile
index 3c1caa2..51052ba 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -76,3 +76,4 @@ obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
 obj-$(CONFIG_CMA)	+= cma.o
 obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
 obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
+obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd..e093b53 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -35,16 +35,10 @@
 #include <linux/highmem.h>
 #include <linux/io.h>
 
-struct cma {
-	unsigned long	base_pfn;
-	unsigned long	count;
-	unsigned long	*bitmap;
-	unsigned int order_per_bit; /* Order of pages represented by one bit */
-	struct mutex	lock;
-};
-
-static struct cma cma_areas[MAX_CMA_AREAS];
-static unsigned cma_area_count;
+#include "cma.h"
+
+struct cma cma_areas[MAX_CMA_AREAS];
+unsigned cma_area_count;
 static DEFINE_MUTEX(cma_mutex);
 
 phys_addr_t cma_get_base(struct cma *cma)
@@ -75,11 +69,6 @@ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
 		(cma->base_pfn >> cma->order_per_bit);
 }
 
-static unsigned long cma_bitmap_maxno(struct cma *cma)
-{
-	return cma->count >> cma->order_per_bit;
-}
-
 static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
 						unsigned long pages)
 {
diff --git a/mm/cma.h b/mm/cma.h
new file mode 100644
index 0000000..4141887
--- /dev/null
+++ b/mm/cma.h
@@ -0,0 +1,20 @@
+#ifndef __MM_CMA_H__
+#define __MM_CMA_H__
+
+struct cma {
+	unsigned long   base_pfn;
+	unsigned long   count;
+	unsigned long   *bitmap;
+	unsigned int order_per_bit; /* Order of pages represented by one bit */
+	struct mutex    lock;
+};
+
+extern struct cma cma_areas[MAX_CMA_AREAS];
+extern unsigned cma_area_count;
+
+static unsigned long cma_bitmap_maxno(struct cma *cma)
+{
+	return cma->count >> cma->order_per_bit;
+}
+
+#endif
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
new file mode 100644
index 0000000..3a25413
--- /dev/null
+++ b/mm/cma_debug.c
@@ -0,0 +1,61 @@
+/*
+ * CMA DebugFS Interface
+ *
+ * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/cma.h>
+
+#include "cma.h"
+
+static struct dentry *cma_debugfs_root;
+
+static int cma_debugfs_get(void *data, u64 *val)
+{
+	unsigned long *p = data;
+
+	*val = *p;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
+
+static void cma_debugfs_add_one(struct cma *cma, int idx)
+{
+	struct dentry *tmp;
+	char name[16];
+	int u32s;
+
+	sprintf(name, "cma-%d", idx);
+
+	tmp = debugfs_create_dir(name, cma_debugfs_root);
+
+	debugfs_create_file("base_pfn", S_IRUGO, tmp,
+				&cma->base_pfn, &cma_debugfs_fops);
+	debugfs_create_file("count", S_IRUGO, tmp,
+				&cma->count, &cma_debugfs_fops);
+	debugfs_create_file("order_per_bit", S_IRUGO, tmp,
+			&cma->order_per_bit, &cma_debugfs_fops);
+
+	u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
+	debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32 *)cma->bitmap, u32s);
+}
+
+static int __init cma_debugfs_init(void)
+{
+	int i;
+
+	cma_debugfs_root = debugfs_create_dir("cma", NULL);
+	if (!cma_debugfs_root)
+		return -ENOMEM;
+
+	for (i = 0; i < cma_area_count; i++)
+		cma_debugfs_add_one(&cma_areas[i], i);
+
+	return 0;
+}
+late_initcall(cma_debugfs_init);
+
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 2/3] mm: cma: allocation trigger
  2015-01-26 14:26 [PATCH v2 0/3] mm: cma: debugfs access to CMA Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 1/3] mm: cma: debugfs interface Sasha Levin
@ 2015-01-26 14:26 ` Sasha Levin
  2015-01-27  8:06   ` Joonsoo Kim
  2015-01-26 14:26 ` [PATCH v2 3/3] mm: cma: release trigger Sasha Levin
  2 siblings, 1 reply; 11+ messages in thread
From: Sasha Levin @ 2015-01-26 14:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: iamjoonsoo.kim, m.szyprowski, akpm, lauraa, Sasha Levin

Provides a userspace interface to trigger a CMA allocation.

Usage:

	echo [pages] > alloc

This would provide testing/fuzzing access to the CMA allocation paths.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
 mm/cma_debug.c |   60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 58 insertions(+), 2 deletions(-)

diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 3a25413..39c7116 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -7,9 +7,22 @@
 
 #include <linux/debugfs.h>
 #include <linux/cma.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/dma-contiguous.h>
+#include <linux/slab.h>
 
 #include "cma.h"
 
+struct cma_mem {
+	struct hlist_node node;
+	struct page *p;
+	unsigned long n;
+};
+
+static HLIST_HEAD(cma_mem_head);
+static DEFINE_SPINLOCK(cma_mem_head_lock);
+
 static struct dentry *cma_debugfs_root;
 
 static int cma_debugfs_get(void *data, u64 *val)
@@ -23,8 +36,48 @@ static int cma_debugfs_get(void *data, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
 
-static void cma_debugfs_add_one(struct cma *cma, int idx)
+static void cma_add_to_cma_mem_list(struct cma_mem *mem)
+{
+	spin_lock(&cma_mem_head_lock);
+	hlist_add_head(&mem->node, &cma_mem_head);
+	spin_unlock(&cma_mem_head_lock);
+}
+
+static int cma_alloc_mem(struct cma *cma, int count)
 {
+	struct cma_mem *mem;
+	struct page *p;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT);
+	if (!p) {
+		kfree(mem);
+		return -ENOMEM;
+	}
+
+	mem->p = p;
+	mem->n = count;
+
+	cma_add_to_cma_mem_list(mem);
+
+	return 0;
+}
+
+static int cma_alloc_write(void *data, u64 val)
+{
+	int pages = val;
+	struct cma *cma = data;
+
+	return cma_alloc_mem(cma, pages);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
+
+static void cma_debugfs_add_one(struct cma *cma, int idx)
+{
 	struct dentry *tmp;
 	char name[16];
 	int u32s;
@@ -33,12 +86,15 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
 
 	tmp = debugfs_create_dir(name, cma_debugfs_root);
 
+	debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
+				&cma_alloc_fops);
+
 	debugfs_create_file("base_pfn", S_IRUGO, tmp,
 				&cma->base_pfn, &cma_debugfs_fops);
 	debugfs_create_file("count", S_IRUGO, tmp,
 				&cma->count, &cma_debugfs_fops);
 	debugfs_create_file("order_per_bit", S_IRUGO, tmp,
-			&cma->order_per_bit, &cma_debugfs_fops);
+				&cma->order_per_bit, &cma_debugfs_fops);
 
 	u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
 	debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 3/3] mm: cma: release trigger
  2015-01-26 14:26 [PATCH v2 0/3] mm: cma: debugfs access to CMA Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 1/3] mm: cma: debugfs interface Sasha Levin
  2015-01-26 14:26 ` [PATCH v2 2/3] mm: cma: allocation trigger Sasha Levin
@ 2015-01-26 14:26 ` Sasha Levin
  2015-01-27  8:10   ` Joonsoo Kim
  2 siblings, 1 reply; 11+ messages in thread
From: Sasha Levin @ 2015-01-26 14:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: iamjoonsoo.kim, m.szyprowski, akpm, lauraa, Sasha Levin

Provides a userspace interface to trigger a CMA release.

Usage:

	echo [pages] > free

This would provide testing/fuzzing access to the CMA release paths.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
 mm/cma_debug.c |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 39c7116..0a63945 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/dma-contiguous.h>
 #include <linux/slab.h>
+#include <linux/mm_types.h>
 
 #include "cma.h"
 
@@ -43,6 +44,56 @@ static void cma_add_to_cma_mem_list(struct cma_mem *mem)
 	spin_unlock(&cma_mem_head_lock);
 }
 
+static struct cma_mem *cma_get_entry_from_list(void)
+{
+	struct cma_mem *mem = NULL;
+
+	spin_lock(&cma_mem_head_lock);
+	if (!hlist_empty(&cma_mem_head)) {
+		mem = hlist_entry(cma_mem_head.first, struct cma_mem, node);
+		hlist_del_init(&mem->node);
+	}
+	spin_unlock(&cma_mem_head_lock);
+
+	return mem;
+}
+
+static int cma_free_mem(struct cma *cma, int count)
+{
+	struct cma_mem *mem = NULL;
+
+	while (count) {
+		mem = cma_get_entry_from_list();
+		if (mem == NULL)
+			return 0;
+
+		if (mem->n <= count) {
+			cma_release(cma, mem->p, mem->n);
+			count -= mem->n;
+			kfree(mem);
+		} else {
+			cma_release(cma, mem->p, count);
+			mem->p += count;
+			mem->n -= count;
+			count = 0;
+			cma_add_to_cma_mem_list(mem);
+		}
+	}
+
+	return 0;
+
+}
+
+static int cma_free_write(void *data, u64 val)
+{
+	int pages = val;
+	struct cma *cma = data;
+
+	return cma_free_mem(cma, pages);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
+
 static int cma_alloc_mem(struct cma *cma, int count)
 {
 	struct cma_mem *mem;
@@ -89,6 +140,9 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
 	debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
 				&cma_alloc_fops);
 
+	debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
+				&cma_free_fops);
+
 	debugfs_create_file("base_pfn", S_IRUGO, tmp,
 				&cma->base_pfn, &cma_debugfs_fops);
 	debugfs_create_file("count", S_IRUGO, tmp,
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/3] mm: cma: allocation trigger
  2015-01-26 14:26 ` [PATCH v2 2/3] mm: cma: allocation trigger Sasha Levin
@ 2015-01-27  8:06   ` Joonsoo Kim
  2015-01-27 15:08     ` Sasha Levin
  0 siblings, 1 reply; 11+ messages in thread
From: Joonsoo Kim @ 2015-01-27  8:06 UTC (permalink / raw)
  To: Sasha Levin; +Cc: linux-kernel, m.szyprowski, akpm, lauraa

On Mon, Jan 26, 2015 at 09:26:04AM -0500, Sasha Levin wrote:
> Provides a userspace interface to trigger a CMA allocation.
> 
> Usage:
> 
> 	echo [pages] > alloc
> 
> This would provide testing/fuzzing access to the CMA allocation paths.
> 
> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
> ---
>  mm/cma_debug.c |   60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 58 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index 3a25413..39c7116 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -7,9 +7,22 @@
>  
>  #include <linux/debugfs.h>
>  #include <linux/cma.h>
> +#include <linux/list.h>
> +#include <linux/kernel.h>
> +#include <linux/dma-contiguous.h>

dma-contiguous.h doesn't needed now.

> +#include <linux/slab.h>
>  
>  #include "cma.h"
>  
> +struct cma_mem {
> +	struct hlist_node node;
> +	struct page *p;
> +	unsigned long n;
> +};
> +
> +static HLIST_HEAD(cma_mem_head);
> +static DEFINE_SPINLOCK(cma_mem_head_lock);
> +
>  static struct dentry *cma_debugfs_root;

How about keeping cma_mem_head on each cma area separately?

>  static int cma_debugfs_get(void *data, u64 *val)
> @@ -23,8 +36,48 @@ static int cma_debugfs_get(void *data, u64 *val)
>  
>  DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
>  
> -static void cma_debugfs_add_one(struct cma *cma, int idx)
> +static void cma_add_to_cma_mem_list(struct cma_mem *mem)
> +{
> +	spin_lock(&cma_mem_head_lock);
> +	hlist_add_head(&mem->node, &cma_mem_head);
> +	spin_unlock(&cma_mem_head_lock);
> +}
> +
> +static int cma_alloc_mem(struct cma *cma, int count)
>  {
> +	struct cma_mem *mem;
> +	struct page *p;
> +
> +	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT);
> +	if (!p) {
> +		kfree(mem);
> +		return -ENOMEM;
> +	}

CONFIG_CMA_ALIGNMENT looks not good. It means just maximum aligment
so it is odd to use this value in testing. Is there special meaning
to use it here?

Could we also get alignment parameter from user? Something like below.

echo "4 1" > alloc
4 for number of pages
1 for alignment.

If it is impossible, just 0 looks better than CONFIG_CMA_ALIGNMENT.

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/3] mm: cma: release trigger
  2015-01-26 14:26 ` [PATCH v2 3/3] mm: cma: release trigger Sasha Levin
@ 2015-01-27  8:10   ` Joonsoo Kim
  2015-01-27 18:25     ` Sasha Levin
  0 siblings, 1 reply; 11+ messages in thread
From: Joonsoo Kim @ 2015-01-27  8:10 UTC (permalink / raw)
  To: Sasha Levin; +Cc: linux-kernel, m.szyprowski, akpm, lauraa

On Mon, Jan 26, 2015 at 09:26:05AM -0500, Sasha Levin wrote:
> Provides a userspace interface to trigger a CMA release.
> 
> Usage:
> 
> 	echo [pages] > free
> 
> This would provide testing/fuzzing access to the CMA release paths.
> 
> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
> ---
>  mm/cma_debug.c |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
> 
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index 39c7116..0a63945 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -11,6 +11,7 @@
>  #include <linux/kernel.h>
>  #include <linux/dma-contiguous.h>
>  #include <linux/slab.h>
> +#include <linux/mm_types.h>

Is mm_types.h needed?

>  
>  #include "cma.h"
>  
> @@ -43,6 +44,56 @@ static void cma_add_to_cma_mem_list(struct cma_mem *mem)
>  	spin_unlock(&cma_mem_head_lock);
>  }
>  
> +static struct cma_mem *cma_get_entry_from_list(void)
> +{
> +	struct cma_mem *mem = NULL;
> +
> +	spin_lock(&cma_mem_head_lock);
> +	if (!hlist_empty(&cma_mem_head)) {
> +		mem = hlist_entry(cma_mem_head.first, struct cma_mem, node);
> +		hlist_del_init(&mem->node);
> +	}
> +	spin_unlock(&cma_mem_head_lock);
> +
> +	return mem;
> +}
> +
> +static int cma_free_mem(struct cma *cma, int count)
> +{
> +	struct cma_mem *mem = NULL;
> +
> +	while (count) {
> +		mem = cma_get_entry_from_list();
> +		if (mem == NULL)
> +			return 0;
> +
> +		if (mem->n <= count) {
> +			cma_release(cma, mem->p, mem->n);
> +			count -= mem->n;
> +			kfree(mem);
> +		} else {
> +			cma_release(cma, mem->p, count);
> +			mem->p += count;
> +			mem->n -= count;
> +			count = 0;
> +			cma_add_to_cma_mem_list(mem);
> +		}
> +	}

If order_per_bit is not 0 and count used in cma_release() is
different with the count used in cma_alloc(), problem could
occur since bitmap management code can't handle that situation.

Could we just disable this case in this testing module?

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/3] mm: cma: allocation trigger
  2015-01-27  8:06   ` Joonsoo Kim
@ 2015-01-27 15:08     ` Sasha Levin
  2015-01-28  1:34       ` Joonsoo Kim
  0 siblings, 1 reply; 11+ messages in thread
From: Sasha Levin @ 2015-01-27 15:08 UTC (permalink / raw)
  To: Joonsoo Kim; +Cc: linux-kernel, m.szyprowski, akpm, lauraa

On 01/27/2015 03:06 AM, Joonsoo Kim wrote:
> On Mon, Jan 26, 2015 at 09:26:04AM -0500, Sasha Levin wrote:
>> Provides a userspace interface to trigger a CMA allocation.
>>
>> Usage:
>>
>> 	echo [pages] > alloc
>>
>> This would provide testing/fuzzing access to the CMA allocation paths.
>>
>> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
>> ---
>>  mm/cma_debug.c |   60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>>  1 file changed, 58 insertions(+), 2 deletions(-)
>>
>> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
>> index 3a25413..39c7116 100644
>> --- a/mm/cma_debug.c
>> +++ b/mm/cma_debug.c
>> @@ -7,9 +7,22 @@
>>  
>>  #include <linux/debugfs.h>
>>  #include <linux/cma.h>
>> +#include <linux/list.h>
>> +#include <linux/kernel.h>
>> +#include <linux/dma-contiguous.h>
> 
> dma-contiguous.h doesn't needed now.

Right.

>> +#include <linux/slab.h>
>>  
>>  #include "cma.h"
>>  
>> +struct cma_mem {
>> +	struct hlist_node node;
>> +	struct page *p;
>> +	unsigned long n;
>> +};
>> +
>> +static HLIST_HEAD(cma_mem_head);
>> +static DEFINE_SPINLOCK(cma_mem_head_lock);
>> +
>>  static struct dentry *cma_debugfs_root;
> 
> How about keeping cma_mem_head on each cma area separately?

Good point, we're mixing allocations here.

>>  static int cma_debugfs_get(void *data, u64 *val)
>> @@ -23,8 +36,48 @@ static int cma_debugfs_get(void *data, u64 *val)
>>  
>>  DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
>>  
>> -static void cma_debugfs_add_one(struct cma *cma, int idx)
>> +static void cma_add_to_cma_mem_list(struct cma_mem *mem)
>> +{
>> +	spin_lock(&cma_mem_head_lock);
>> +	hlist_add_head(&mem->node, &cma_mem_head);
>> +	spin_unlock(&cma_mem_head_lock);
>> +}
>> +
>> +static int cma_alloc_mem(struct cma *cma, int count)
>>  {
>> +	struct cma_mem *mem;
>> +	struct page *p;
>> +
>> +	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
>> +	if (!mem)
>> +		return -ENOMEM;
>> +
>> +	p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT);
>> +	if (!p) {
>> +		kfree(mem);
>> +		return -ENOMEM;
>> +	}
> 
> CONFIG_CMA_ALIGNMENT looks not good. It means just maximum aligment
> so it is odd to use this value in testing. Is there special meaning
> to use it here?

No good reason, I stole that from a different piece of code.

> Could we also get alignment parameter from user? Something like below.
> 
> echo "4 1" > alloc
> 4 for number of pages
> 1 for alignment.
> 
> If it is impossible, just 0 looks better than CONFIG_CMA_ALIGNMENT.

I'd rather keep it simple and use a single parameter for now.


Thanks,
Sasha

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/3] mm: cma: release trigger
  2015-01-27  8:10   ` Joonsoo Kim
@ 2015-01-27 18:25     ` Sasha Levin
  2015-01-27 20:13       ` Sasha Levin
  0 siblings, 1 reply; 11+ messages in thread
From: Sasha Levin @ 2015-01-27 18:25 UTC (permalink / raw)
  To: Joonsoo Kim; +Cc: linux-kernel, m.szyprowski, akpm, lauraa

On 01/27/2015 03:10 AM, Joonsoo Kim wrote:
>> +		if (mem->n <= count) {
>> > +			cma_release(cma, mem->p, mem->n);
>> > +			count -= mem->n;
>> > +			kfree(mem);
>> > +		} else {
>> > +			cma_release(cma, mem->p, count);
>> > +			mem->p += count;
>> > +			mem->n -= count;
>> > +			count = 0;
>> > +			cma_add_to_cma_mem_list(mem);
>> > +		}
>> > +	}
> If order_per_bit is not 0 and count used in cma_release() is
> different with the count used in cma_alloc(), problem could
> occur since bitmap management code can't handle that situation.
> 
> Could we just disable this case in this testing module?

How should it behave then? Just free a max of 'count' pages and
stop beforehand if we're going to go over it?


Thanks,
Sasha

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/3] mm: cma: release trigger
  2015-01-27 18:25     ` Sasha Levin
@ 2015-01-27 20:13       ` Sasha Levin
  2015-01-28  1:35         ` Joonsoo Kim
  0 siblings, 1 reply; 11+ messages in thread
From: Sasha Levin @ 2015-01-27 20:13 UTC (permalink / raw)
  To: Joonsoo Kim; +Cc: linux-kernel, m.szyprowski, akpm, lauraa

On 01/27/2015 01:25 PM, Sasha Levin wrote:
> On 01/27/2015 03:10 AM, Joonsoo Kim wrote:
>>> >> +		if (mem->n <= count) {
>>>> >> > +			cma_release(cma, mem->p, mem->n);
>>>> >> > +			count -= mem->n;
>>>> >> > +			kfree(mem);
>>>> >> > +		} else {
>>>> >> > +			cma_release(cma, mem->p, count);
>>>> >> > +			mem->p += count;
>>>> >> > +			mem->n -= count;
>>>> >> > +			count = 0;
>>>> >> > +			cma_add_to_cma_mem_list(mem);
>>>> >> > +		}
>>>> >> > +	}
>> > If order_per_bit is not 0 and count used in cma_release() is
>> > different with the count used in cma_alloc(), problem could
>> > occur since bitmap management code can't handle that situation.
>> > 
>> > Could we just disable this case in this testing module?
> How should it behave then? Just free a max of 'count' pages and
> stop beforehand if we're going to go over it?

Actually, Can I just check for order_per_bit == 0 and execute it
then? I don't want to avoid testing these paths.


Thanks,
Sasha

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/3] mm: cma: allocation trigger
  2015-01-27 15:08     ` Sasha Levin
@ 2015-01-28  1:34       ` Joonsoo Kim
  0 siblings, 0 replies; 11+ messages in thread
From: Joonsoo Kim @ 2015-01-28  1:34 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Joonsoo Kim, LKML, Marek Szyprowski, Andrew Morton, Laura Abbott

2015-01-28 0:08 GMT+09:00 Sasha Levin <sasha.levin@oracle.com>:
> On 01/27/2015 03:06 AM, Joonsoo Kim wrote:
>> On Mon, Jan 26, 2015 at 09:26:04AM -0500, Sasha Levin wrote:
>>> Provides a userspace interface to trigger a CMA allocation.
>>>
>>> Usage:
>>>
>>>      echo [pages] > alloc
>>>
>>> This would provide testing/fuzzing access to the CMA allocation paths.
>>>
>>> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
>>> ---
>>>  mm/cma_debug.c |   60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>>>  1 file changed, 58 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
>>> index 3a25413..39c7116 100644
>>> --- a/mm/cma_debug.c
>>> +++ b/mm/cma_debug.c
>>> @@ -7,9 +7,22 @@
>>>
>>>  #include <linux/debugfs.h>
>>>  #include <linux/cma.h>
>>> +#include <linux/list.h>
>>> +#include <linux/kernel.h>
>>> +#include <linux/dma-contiguous.h>
>>
>> dma-contiguous.h doesn't needed now.
>
> Right.
>
>>> +#include <linux/slab.h>
>>>
>>>  #include "cma.h"
>>>
>>> +struct cma_mem {
>>> +    struct hlist_node node;
>>> +    struct page *p;
>>> +    unsigned long n;
>>> +};
>>> +
>>> +static HLIST_HEAD(cma_mem_head);
>>> +static DEFINE_SPINLOCK(cma_mem_head_lock);
>>> +
>>>  static struct dentry *cma_debugfs_root;
>>
>> How about keeping cma_mem_head on each cma area separately?
>
> Good point, we're mixing allocations here.
>
>>>  static int cma_debugfs_get(void *data, u64 *val)
>>> @@ -23,8 +36,48 @@ static int cma_debugfs_get(void *data, u64 *val)
>>>
>>>  DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
>>>
>>> -static void cma_debugfs_add_one(struct cma *cma, int idx)
>>> +static void cma_add_to_cma_mem_list(struct cma_mem *mem)
>>> +{
>>> +    spin_lock(&cma_mem_head_lock);
>>> +    hlist_add_head(&mem->node, &cma_mem_head);
>>> +    spin_unlock(&cma_mem_head_lock);
>>> +}
>>> +
>>> +static int cma_alloc_mem(struct cma *cma, int count)
>>>  {
>>> +    struct cma_mem *mem;
>>> +    struct page *p;
>>> +
>>> +    mem = kzalloc(sizeof(*mem), GFP_KERNEL);
>>> +    if (!mem)
>>> +            return -ENOMEM;
>>> +
>>> +    p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT);
>>> +    if (!p) {
>>> +            kfree(mem);
>>> +            return -ENOMEM;
>>> +    }
>>
>> CONFIG_CMA_ALIGNMENT looks not good. It means just maximum aligment
>> so it is odd to use this value in testing. Is there special meaning
>> to use it here?
>
> No good reason, I stole that from a different piece of code.
>
>> Could we also get alignment parameter from user? Something like below.
>>
>> echo "4 1" > alloc
>> 4 for number of pages
>> 1 for alignment.
>>
>> If it is impossible, just 0 looks better than CONFIG_CMA_ALIGNMENT.
>
> I'd rather keep it simple and use a single parameter for now.

Okay, then use 0 rather than CONFIG_CMA_ALIGNMENT.

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/3] mm: cma: release trigger
  2015-01-27 20:13       ` Sasha Levin
@ 2015-01-28  1:35         ` Joonsoo Kim
  0 siblings, 0 replies; 11+ messages in thread
From: Joonsoo Kim @ 2015-01-28  1:35 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Joonsoo Kim, LKML, Marek Szyprowski, Andrew Morton, Laura Abbott

2015-01-28 5:13 GMT+09:00 Sasha Levin <sasha.levin@oracle.com>:
> On 01/27/2015 01:25 PM, Sasha Levin wrote:
>> On 01/27/2015 03:10 AM, Joonsoo Kim wrote:
>>>> >> +                if (mem->n <= count) {
>>>>> >> > +                     cma_release(cma, mem->p, mem->n);
>>>>> >> > +                     count -= mem->n;
>>>>> >> > +                     kfree(mem);
>>>>> >> > +             } else {
>>>>> >> > +                     cma_release(cma, mem->p, count);
>>>>> >> > +                     mem->p += count;
>>>>> >> > +                     mem->n -= count;
>>>>> >> > +                     count = 0;
>>>>> >> > +                     cma_add_to_cma_mem_list(mem);
>>>>> >> > +             }
>>>>> >> > +     }
>>> > If order_per_bit is not 0 and count used in cma_release() is
>>> > different with the count used in cma_alloc(), problem could
>>> > occur since bitmap management code can't handle that situation.
>>> >
>>> > Could we just disable this case in this testing module?
>> How should it behave then? Just free a max of 'count' pages and
>> stop beforehand if we're going to go over it?
>
> Actually, Can I just check for order_per_bit == 0 and execute it
> then? I don't want to avoid testing these paths.

Okay. I'm okay if you check order_per_bit == 0 in such case.

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2015-01-28  1:35 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-26 14:26 [PATCH v2 0/3] mm: cma: debugfs access to CMA Sasha Levin
2015-01-26 14:26 ` [PATCH v2 1/3] mm: cma: debugfs interface Sasha Levin
2015-01-26 14:26 ` [PATCH v2 2/3] mm: cma: allocation trigger Sasha Levin
2015-01-27  8:06   ` Joonsoo Kim
2015-01-27 15:08     ` Sasha Levin
2015-01-28  1:34       ` Joonsoo Kim
2015-01-26 14:26 ` [PATCH v2 3/3] mm: cma: release trigger Sasha Levin
2015-01-27  8:10   ` Joonsoo Kim
2015-01-27 18:25     ` Sasha Levin
2015-01-27 20:13       ` Sasha Levin
2015-01-28  1:35         ` Joonsoo Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.