All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marek Szyprowski <m.szyprowski@samsung.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org, linux-mm@kvack.org,
	linaro-mm-sig@lists.linaro.org
Cc: Michal Nazarewicz <mina86@mina86.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Ankita Garg <ankita@in.ibm.com>,
	Daniel Walker <dwalker@codeaurora.org>,
	Johan MOSSBERG <johan.xx.mossberg@stericsson.com>,
	Mel Gorman <mel@csn.ul.ie>, Arnd Bergmann <arnd@arndb.de>,
	Jesse Barker <jesse.barker@linaro.org>
Subject: [PATCH 09/10] ARM: integrate CMA with dma-mapping subsystem
Date: Fri, 10 Jun 2011 11:54:57 +0200	[thread overview]
Message-ID: <1307699698-29369-10-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1307699698-29369-1-git-send-email-m.szyprowski@samsung.com>

This patch adds support for CMA to dma-mapping subsystem for ARM
architecture. CMA area can be defined individually for each device in
the system. This is up to the board startup code to create CMA area and
assign it to the devices.

Buffer alignment is derived from the buffer size, but for only for
buffers up to 1MiB. Larger buffers are aligned to 1MiB always.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
 arch/arm/include/asm/device.h      |    3 ++
 arch/arm/include/asm/dma-mapping.h |   19 +++++++++++
 arch/arm/mm/dma-mapping.c          |   60 +++++++++++++++++++++++++++---------
 3 files changed, 67 insertions(+), 15 deletions(-)

diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 9f390ce..942913e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
 #ifdef CONFIG_DMABOUNCE
 	struct dmabounce_device_info *dmabounce;
 #endif
+#ifdef CONFIG_CMA
+	struct cma *cma_area;
+#endif
 };
 
 struct pdev_archdata {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837..e387ea7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,6 +14,25 @@
 #error Please update to __arch_pfn_to_dma
 #endif
 
+struct cma;
+
+#ifdef CONFIG_CMA
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return dev->archdata.cma_area;
+}
+
+static inline void set_dev_cma_area(struct device *dev, struct cma *cma)
+{
+	dev->archdata.cma_area = cma;
+}
+#else
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
 /*
  * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  * functions used internally by the DMA-mapping API to provide DMA
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093c..233e34a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/highmem.h>
+#include <linux/cma.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -52,16 +53,36 @@ static u64 get_coherent_dma_mask(struct device *dev)
 	return mask;
 }
 
+
+static struct page *__alloc_system_pages(size_t count, unsigned int order, gfp_t gfp)
+{
+	struct page *page, *p, *e;
+
+	page = alloc_pages(gfp, order);
+	if (!page)
+		return NULL;
+
+	/*
+	 * Now split the huge page and free the excess pages
+	 */
+	split_page(page, order);
+	for (p = page + count, e = page + (1 << order); p < e; p++)
+		__free_page(p);
+	return page;
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
  */
 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
 {
-	unsigned long order = get_order(size);
-	struct page *page, *p, *e;
+	struct cma *cma = get_dev_cma_area(dev);
+	struct page *page;
+	size_t count = size >> PAGE_SHIFT;
 	void *ptr;
 	u64 mask = get_coherent_dma_mask(dev);
+	unsigned long order = get_order(count << PAGE_SHIFT);
 
 #ifdef CONFIG_DMA_API_DEBUG
 	u64 limit = (mask + 1) & ~mask;
@@ -78,16 +99,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	if (mask < 0xffffffffULL)
 		gfp |= GFP_DMA;
 
-	page = alloc_pages(gfp, order);
-	if (!page)
-		return NULL;
+	/*
+	 * First, try to allocate memory from contiguous area aligned up to 1MiB
+	 */
+	page = cm_alloc(cma, count, order < 8 ? 8 : order);
 
 	/*
-	 * Now split the huge page and free the excess pages
+	 * Fallback if contiguous alloc fails or is not available
 	 */
-	split_page(page, order);
-	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
-		__free_page(p);
+	if (!page)
+		page = __alloc_system_pages(count, order, gfp);
+
+	if (!page)
+		return NULL;
 
 	/*
 	 * Ensure that the allocated pages are zeroed, and that any data
@@ -104,13 +128,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 /*
  * Free a DMA buffer.  'size' must be page aligned.
  */
-static void __dma_free_buffer(struct page *page, size_t size)
+static void __dma_free_buffer(struct device *dev, struct page *page, size_t size)
 {
-	struct page *e = page + (size >> PAGE_SHIFT);
+	struct cma *cma = get_dev_cma_area(dev);
+	size_t count = size >> PAGE_SHIFT;
+	struct page *e = page + count;
 
-	while (page < e) {
-		__free_page(page);
-		page++;
+	if (cma) {
+		cm_free(cma, page, count);
+	} else {
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
 	}
 }
 
@@ -416,7 +446,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 	if (!arch_is_coherent())
 		__dma_free_remap(cpu_addr, size);
 
-	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+	__dma_free_buffer(dev, pfn_to_page(dma_to_pfn(dev, handle)), size);
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
-- 
1.7.1.569.g6f426


WARNING: multiple messages have this Message-ID (diff)
From: Marek Szyprowski <m.szyprowski@samsung.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org, linux-mm@kvack.org,
	linaro-mm-sig@lists.linaro.org
Cc: Michal Nazarewicz <mina86@mina86.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Ankita Garg <ankita@in.ibm.com>,
	Daniel Walker <dwalker@codeaurora.org>,
	Johan MOSSBERG <johan.xx.mossberg@stericsson.com>,
	Mel Gorman <mel@csn.ul.ie>, Arnd Bergmann <arnd@arndb.de>,
	Jesse Barker <jesse.barker@linaro.org>
Subject: [PATCH 09/10] ARM: integrate CMA with dma-mapping subsystem
Date: Fri, 10 Jun 2011 11:54:57 +0200	[thread overview]
Message-ID: <1307699698-29369-10-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1307699698-29369-1-git-send-email-m.szyprowski@samsung.com>

This patch adds support for CMA to dma-mapping subsystem for ARM
architecture. CMA area can be defined individually for each device in
the system. This is up to the board startup code to create CMA area and
assign it to the devices.

Buffer alignment is derived from the buffer size, but for only for
buffers up to 1MiB. Larger buffers are aligned to 1MiB always.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
 arch/arm/include/asm/device.h      |    3 ++
 arch/arm/include/asm/dma-mapping.h |   19 +++++++++++
 arch/arm/mm/dma-mapping.c          |   60 +++++++++++++++++++++++++++---------
 3 files changed, 67 insertions(+), 15 deletions(-)

diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 9f390ce..942913e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
 #ifdef CONFIG_DMABOUNCE
 	struct dmabounce_device_info *dmabounce;
 #endif
+#ifdef CONFIG_CMA
+	struct cma *cma_area;
+#endif
 };
 
 struct pdev_archdata {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837..e387ea7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,6 +14,25 @@
 #error Please update to __arch_pfn_to_dma
 #endif
 
+struct cma;
+
+#ifdef CONFIG_CMA
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return dev->archdata.cma_area;
+}
+
+static inline void set_dev_cma_area(struct device *dev, struct cma *cma)
+{
+	dev->archdata.cma_area = cma;
+}
+#else
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
 /*
  * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  * functions used internally by the DMA-mapping API to provide DMA
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093c..233e34a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/highmem.h>
+#include <linux/cma.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -52,16 +53,36 @@ static u64 get_coherent_dma_mask(struct device *dev)
 	return mask;
 }
 
+
+static struct page *__alloc_system_pages(size_t count, unsigned int order, gfp_t gfp)
+{
+	struct page *page, *p, *e;
+
+	page = alloc_pages(gfp, order);
+	if (!page)
+		return NULL;
+
+	/*
+	 * Now split the huge page and free the excess pages
+	 */
+	split_page(page, order);
+	for (p = page + count, e = page + (1 << order); p < e; p++)
+		__free_page(p);
+	return page;
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
  */
 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
 {
-	unsigned long order = get_order(size);
-	struct page *page, *p, *e;
+	struct cma *cma = get_dev_cma_area(dev);
+	struct page *page;
+	size_t count = size >> PAGE_SHIFT;
 	void *ptr;
 	u64 mask = get_coherent_dma_mask(dev);
+	unsigned long order = get_order(count << PAGE_SHIFT);
 
 #ifdef CONFIG_DMA_API_DEBUG
 	u64 limit = (mask + 1) & ~mask;
@@ -78,16 +99,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	if (mask < 0xffffffffULL)
 		gfp |= GFP_DMA;
 
-	page = alloc_pages(gfp, order);
-	if (!page)
-		return NULL;
+	/*
+	 * First, try to allocate memory from contiguous area aligned up to 1MiB
+	 */
+	page = cm_alloc(cma, count, order < 8 ? 8 : order);
 
 	/*
-	 * Now split the huge page and free the excess pages
+	 * Fallback if contiguous alloc fails or is not available
 	 */
-	split_page(page, order);
-	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
-		__free_page(p);
+	if (!page)
+		page = __alloc_system_pages(count, order, gfp);
+
+	if (!page)
+		return NULL;
 
 	/*
 	 * Ensure that the allocated pages are zeroed, and that any data
@@ -104,13 +128,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 /*
  * Free a DMA buffer.  'size' must be page aligned.
  */
-static void __dma_free_buffer(struct page *page, size_t size)
+static void __dma_free_buffer(struct device *dev, struct page *page, size_t size)
 {
-	struct page *e = page + (size >> PAGE_SHIFT);
+	struct cma *cma = get_dev_cma_area(dev);
+	size_t count = size >> PAGE_SHIFT;
+	struct page *e = page + count;
 
-	while (page < e) {
-		__free_page(page);
-		page++;
+	if (cma) {
+		cm_free(cma, page, count);
+	} else {
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
 	}
 }
 
@@ -416,7 +446,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 	if (!arch_is_coherent())
 		__dma_free_remap(cpu_addr, size);
 
-	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+	__dma_free_buffer(dev, pfn_to_page(dma_to_pfn(dev, handle)), size);
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
-- 
1.7.1.569.g6f426

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: m.szyprowski@samsung.com (Marek Szyprowski)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 09/10] ARM: integrate CMA with dma-mapping subsystem
Date: Fri, 10 Jun 2011 11:54:57 +0200	[thread overview]
Message-ID: <1307699698-29369-10-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1307699698-29369-1-git-send-email-m.szyprowski@samsung.com>

This patch adds support for CMA to dma-mapping subsystem for ARM
architecture. CMA area can be defined individually for each device in
the system. This is up to the board startup code to create CMA area and
assign it to the devices.

Buffer alignment is derived from the buffer size, but for only for
buffers up to 1MiB. Larger buffers are aligned to 1MiB always.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
 arch/arm/include/asm/device.h      |    3 ++
 arch/arm/include/asm/dma-mapping.h |   19 +++++++++++
 arch/arm/mm/dma-mapping.c          |   60 +++++++++++++++++++++++++++---------
 3 files changed, 67 insertions(+), 15 deletions(-)

diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 9f390ce..942913e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
 #ifdef CONFIG_DMABOUNCE
 	struct dmabounce_device_info *dmabounce;
 #endif
+#ifdef CONFIG_CMA
+	struct cma *cma_area;
+#endif
 };
 
 struct pdev_archdata {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837..e387ea7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,6 +14,25 @@
 #error Please update to __arch_pfn_to_dma
 #endif
 
+struct cma;
+
+#ifdef CONFIG_CMA
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return dev->archdata.cma_area;
+}
+
+static inline void set_dev_cma_area(struct device *dev, struct cma *cma)
+{
+	dev->archdata.cma_area = cma;
+}
+#else
+static inline struct cma *get_dev_cma_area(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
 /*
  * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  * functions used internally by the DMA-mapping API to provide DMA
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093c..233e34a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/highmem.h>
+#include <linux/cma.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -52,16 +53,36 @@ static u64 get_coherent_dma_mask(struct device *dev)
 	return mask;
 }
 
+
+static struct page *__alloc_system_pages(size_t count, unsigned int order, gfp_t gfp)
+{
+	struct page *page, *p, *e;
+
+	page = alloc_pages(gfp, order);
+	if (!page)
+		return NULL;
+
+	/*
+	 * Now split the huge page and free the excess pages
+	 */
+	split_page(page, order);
+	for (p = page + count, e = page + (1 << order); p < e; p++)
+		__free_page(p);
+	return page;
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
  */
 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
 {
-	unsigned long order = get_order(size);
-	struct page *page, *p, *e;
+	struct cma *cma = get_dev_cma_area(dev);
+	struct page *page;
+	size_t count = size >> PAGE_SHIFT;
 	void *ptr;
 	u64 mask = get_coherent_dma_mask(dev);
+	unsigned long order = get_order(count << PAGE_SHIFT);
 
 #ifdef CONFIG_DMA_API_DEBUG
 	u64 limit = (mask + 1) & ~mask;
@@ -78,16 +99,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	if (mask < 0xffffffffULL)
 		gfp |= GFP_DMA;
 
-	page = alloc_pages(gfp, order);
-	if (!page)
-		return NULL;
+	/*
+	 * First, try to allocate memory from contiguous area aligned up to 1MiB
+	 */
+	page = cm_alloc(cma, count, order < 8 ? 8 : order);
 
 	/*
-	 * Now split the huge page and free the excess pages
+	 * Fallback if contiguous alloc fails or is not available
 	 */
-	split_page(page, order);
-	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
-		__free_page(p);
+	if (!page)
+		page = __alloc_system_pages(count, order, gfp);
+
+	if (!page)
+		return NULL;
 
 	/*
 	 * Ensure that the allocated pages are zeroed, and that any data
@@ -104,13 +128,19 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 /*
  * Free a DMA buffer.  'size' must be page aligned.
  */
-static void __dma_free_buffer(struct page *page, size_t size)
+static void __dma_free_buffer(struct device *dev, struct page *page, size_t size)
 {
-	struct page *e = page + (size >> PAGE_SHIFT);
+	struct cma *cma = get_dev_cma_area(dev);
+	size_t count = size >> PAGE_SHIFT;
+	struct page *e = page + count;
 
-	while (page < e) {
-		__free_page(page);
-		page++;
+	if (cma) {
+		cm_free(cma, page, count);
+	} else {
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
 	}
 }
 
@@ -416,7 +446,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 	if (!arch_is_coherent())
 		__dma_free_remap(cpu_addr, size);
 
-	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+	__dma_free_buffer(dev, pfn_to_page(dma_to_pfn(dev, handle)), size);
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
-- 
1.7.1.569.g6f426

  parent reply	other threads:[~2011-06-10  9:56 UTC|newest]

Thread overview: 178+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-06-10  9:54 [PATCHv10 0/10] Contiguous Memory Allocator Marek Szyprowski
2011-06-10  9:54 ` Marek Szyprowski
2011-06-10  9:54 ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 01/10] lib: bitmap: Added alignment offset for bitmap_find_next_zero_area() Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 02/10] lib: genalloc: Generic allocator improvements Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10 11:24   ` Alan Cox
2011-06-10 11:24     ` Alan Cox
2011-06-10 11:24     ` Alan Cox
2011-06-10 12:22     ` Marek Szyprowski
2011-06-10 12:22       ` Marek Szyprowski
2011-06-10 12:22       ` Marek Szyprowski
2011-06-10 12:52       ` Alan Cox
2011-06-10 12:52         ` Alan Cox
2011-06-10 12:52         ` Alan Cox
2011-06-10 17:16         ` Michal Nazarewicz
2011-06-10 17:16           ` Michal Nazarewicz
2011-06-10 17:16           ` Michal Nazarewicz
2011-06-14 15:49         ` [Linaro-mm-sig] " Jordan Crouse
2011-06-14 15:49           ` Jordan Crouse
2011-06-14 15:49           ` Jordan Crouse
2011-06-10  9:54 ` [PATCH 03/10] mm: move some functions from memory_hotplug.c to page_isolation.c Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 04/10] mm: alloc_contig_freed_pages() added Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 05/10] mm: alloc_contig_range() added Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 06/10] mm: MIGRATE_CMA migration type added Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 07/10] mm: MIGRATE_CMA isolation functions added Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 08/10] mm: cma: Contiguous Memory Allocator added Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10 16:21   ` Arnd Bergmann
2011-06-10 16:21     ` Arnd Bergmann
2011-06-10 16:21     ` Arnd Bergmann
2011-06-13  9:05     ` Marek Szyprowski
2011-06-13  9:05       ` Marek Szyprowski
2011-06-13  9:05       ` Marek Szyprowski
2011-06-14 13:49       ` Arnd Bergmann
2011-06-14 13:49         ` Arnd Bergmann
2011-06-14 13:49         ` Arnd Bergmann
2011-06-14 13:55         ` Michal Nazarewicz
2011-06-14 13:55           ` Michal Nazarewicz
2011-06-14 13:55           ` Michal Nazarewicz
2011-06-14 16:03           ` Arnd Bergmann
2011-06-14 16:03             ` Arnd Bergmann
2011-06-14 16:03             ` Arnd Bergmann
2011-06-14 16:58             ` Michal Nazarewicz
2011-06-14 16:58               ` Michal Nazarewicz
2011-06-14 16:58               ` Michal Nazarewicz
2011-06-14 18:30               ` Arnd Bergmann
2011-06-14 18:30                 ` Arnd Bergmann
2011-06-14 18:30                 ` Arnd Bergmann
2011-06-14 18:40                 ` Michal Nazarewicz
2011-06-14 18:40                   ` Michal Nazarewicz
2011-06-14 18:40                   ` Michal Nazarewicz
2011-06-15  7:11                 ` Marek Szyprowski
2011-06-15  7:11                   ` Marek Szyprowski
2011-06-15  7:11                   ` Marek Szyprowski
2011-06-15  7:37                   ` Arnd Bergmann
2011-06-15  7:37                     ` Arnd Bergmann
2011-06-15  7:37                     ` Arnd Bergmann
2011-06-15  8:14                     ` Marek Szyprowski
2011-06-15  8:14                       ` Marek Szyprowski
2011-06-15  8:14                       ` Marek Szyprowski
2011-06-16  0:48                     ` [Linaro-mm-sig] " Philip Balister
2011-06-16  0:48                       ` Philip Balister
2011-06-16  0:48                       ` Philip Balister
2011-06-16  7:03                       ` Arnd Bergmann
2011-06-16  7:03                         ` Arnd Bergmann
2011-06-16  7:03                         ` Arnd Bergmann
2011-06-16  7:03                         ` Arnd Bergmann
2011-06-22  7:03                     ` Hans Verkuil
2011-06-22  7:03                       ` Hans Verkuil
2011-06-22  7:03                       ` Hans Verkuil
2011-06-22  7:32                       ` Michal Nazarewicz
2011-06-22  7:32                         ` Michal Nazarewicz
2011-06-22  7:32                         ` Michal Nazarewicz
2011-06-22 12:42                       ` Arnd Bergmann
2011-06-22 12:42                         ` Arnd Bergmann
2011-06-22 12:42                         ` Arnd Bergmann
2011-06-22 13:15                         ` Marek Szyprowski
2011-06-22 13:15                           ` Marek Szyprowski
2011-06-22 13:15                           ` Marek Szyprowski
2011-06-22 13:39                           ` Arnd Bergmann
2011-06-22 13:39                             ` Arnd Bergmann
2011-06-22 13:39                             ` Arnd Bergmann
2011-06-22 13:39                             ` Arnd Bergmann
2011-06-22 16:04                             ` Michal Nazarewicz
2011-06-22 16:04                               ` Michal Nazarewicz
2011-06-22 16:04                               ` Michal Nazarewicz
2011-06-22 15:54                           ` Michal Nazarewicz
2011-06-22 15:54                             ` Michal Nazarewicz
2011-06-22 15:54                             ` Michal Nazarewicz
2011-06-15 11:53                 ` Daniel Vetter
2011-06-15 11:53                   ` Daniel Vetter
2011-06-15 11:53                   ` Daniel Vetter
2011-06-15 13:12                   ` Thomas Hellstrom
2011-06-15 13:12                     ` Thomas Hellstrom
2011-06-15 13:12                     ` Thomas Hellstrom
2011-06-17 16:08                   ` Arnd Bergmann
2011-06-17 16:08                     ` Arnd Bergmann
2011-06-17 16:08                     ` Arnd Bergmann
2011-06-14 17:01             ` Daniel Stone
2011-06-14 17:01               ` Daniel Stone
2011-06-14 17:01               ` Daniel Stone
2011-06-14 18:58               ` Zach Pfeffer
2011-06-14 18:58                 ` Zach Pfeffer
2011-06-14 18:58                 ` Zach Pfeffer
2011-06-14 20:42                 ` Arnd Bergmann
2011-06-14 20:42                   ` Arnd Bergmann
2011-06-14 20:42                   ` Arnd Bergmann
2011-06-14 21:01                   ` Jordan Crouse
2011-06-14 21:01                     ` Jordan Crouse
2011-06-14 21:01                     ` Jordan Crouse
2011-06-15 11:27                     ` Arnd Bergmann
2011-06-15 11:27                       ` Arnd Bergmann
2011-06-15 11:27                       ` Arnd Bergmann
2011-06-15 11:27                       ` Arnd Bergmann
2011-06-15  6:29                   ` Subash Patel
2011-06-15  8:36                   ` Marek Szyprowski
2011-06-15  8:36                     ` Marek Szyprowski
2011-06-15  8:36                     ` Marek Szyprowski
2011-06-15 21:39                     ` Larry Bassel
2011-06-15 21:39                       ` Larry Bassel
2011-06-15 21:39                       ` Larry Bassel
2011-06-15 22:06                       ` Arnd Bergmann
2011-06-15 22:06                         ` Arnd Bergmann
2011-06-15 22:06                         ` Arnd Bergmann
2011-06-16 17:01                         ` Larry Bassel
2011-06-16 17:01                           ` Larry Bassel
2011-06-16 17:01                           ` Larry Bassel
2011-06-17 12:45                           ` Arnd Bergmann
2011-06-17 12:45                             ` Arnd Bergmann
2011-06-17 12:45                             ` Arnd Bergmann
2011-07-04  5:25                         ` Ankita Garg
2011-07-04  5:25                           ` Ankita Garg
2011-07-04  5:25                           ` Ankita Garg
2011-07-04 14:45                           ` Arnd Bergmann
2011-07-04 14:45                             ` Arnd Bergmann
2011-07-04 14:45                             ` Arnd Bergmann
2011-06-16  3:20                       ` Zach Pfeffer
2011-06-16  3:20                         ` Zach Pfeffer
2011-06-16  3:20                         ` Zach Pfeffer
2011-06-15  9:26                   ` Michal Nazarewicz
2011-06-15  9:26                     ` Michal Nazarewicz
2011-06-15  9:26                     ` Michal Nazarewicz
2011-06-15 11:20                     ` Arnd Bergmann
2011-06-15 11:20                       ` Arnd Bergmann
2011-06-15 11:20                       ` Arnd Bergmann
2011-06-15 11:30                       ` Michal Nazarewicz
2011-06-15 11:30                         ` Michal Nazarewicz
2011-06-15 11:30                         ` Michal Nazarewicz
2011-06-15  6:01             ` Subash Patel
2011-06-15  6:01               ` Subash Patel
2011-06-15  6:01               ` Subash Patel
2011-06-15  8:02         ` Marek Szyprowski
2011-06-15  8:02           ` Marek Szyprowski
2011-06-15  8:02           ` Marek Szyprowski
2011-06-15 11:14           ` Arnd Bergmann
2011-06-15 11:14             ` Arnd Bergmann
2011-06-15 11:14             ` Arnd Bergmann
2011-06-10  9:54 ` Marek Szyprowski [this message]
2011-06-10  9:54   ` [PATCH 09/10] ARM: integrate CMA with dma-mapping subsystem Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54 ` [PATCH 10/10] ARM: S5PV210: add CMA support for FIMC devices on Aquila board Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski
2011-06-10  9:54   ` Marek Szyprowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1307699698-29369-10-git-send-email-m.szyprowski@samsung.com \
    --to=m.szyprowski@samsung.com \
    --cc=akpm@linux-foundation.org \
    --cc=ankita@in.ibm.com \
    --cc=arnd@arndb.de \
    --cc=dwalker@codeaurora.org \
    --cc=jesse.barker@linaro.org \
    --cc=johan.xx.mossberg@stericsson.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kyungmin.park@samsung.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mina86@mina86.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.