All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v13 1/6] genalloc:support memory-allocation with bytes-alignment to genalloc
@ 2015-11-30  2:48 Zhao Qiang
  2015-11-30  2:48 ` [PATCH v13 2/6] genalloc:support allocating specific region Zhao Qiang
                   ` (5 more replies)
  0 siblings, 6 replies; 10+ messages in thread
From: Zhao Qiang @ 2015-11-30  2:48 UTC (permalink / raw)
  To: lauraa
  Cc: linux-kernel, linuxppc-dev, akpm, olof, catalin.marinas,
	scottwood, X.xie, Zhao Qiang

Bytes alignment is required to manage some special RAM,
so add gen_pool_first_fit_align to genalloc,
meanwhile add gen_pool_alloc_algo to pass algo in case user
layer using more than one algo, and pass data to
gen_pool_first_fit_align(modify gen_pool_alloc as a wrapper)

Signed-off-by: Zhao Qiang <qiang.zhao@freescale.com>
---
Changes for v6:
	- patches set v6 include a new patch because of using 
	- genalloc to manage QE MURAM, patch 0001 is the new 
	- patch, adding bytes alignment for allocation for use.
Changes for v7:
	- cpm muram also need to use genalloc to manage, it has 
	  a function to reserve a specific region of muram,
	  add offset to genpool_data for start addr to be allocated.
Changes for v8:
	- remove supporting reserving a specific region from this patch
	  add a new patch to support it.
Changes for v9:
	- Nil 
Changes for v10:
	- Nil
Changes for v11:
	- Nil
Changes for v12:
	- Nil
Changes for v13:
	- add gen_pool_alloc_algo instead of gen_pool_alloc_data to pass algo and data.
	- rebase

 include/linux/genalloc.h | 27 +++++++++++++++++----
 lib/genalloc.c           | 61 +++++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 78 insertions(+), 10 deletions(-)

diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 7ff168d..3c676ce 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -30,10 +30,12 @@
 #ifndef __GENALLOC_H__
 #define __GENALLOC_H__
 
+#include <linux/types.h>
 #include <linux/spinlock_types.h>
 
 struct device;
 struct device_node;
+struct gen_pool;
 
 /**
  * Allocation callback function type definition
@@ -47,7 +49,7 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
 			unsigned long size,
 			unsigned long start,
 			unsigned int nr,
-			void *data);
+			void *data, struct gen_pool *pool);
 
 /*
  *  General purpose special memory pool descriptor.
@@ -75,6 +77,13 @@ struct gen_pool_chunk {
 	unsigned long bits[0];		/* bitmap for allocating memory chunk */
 };
 
+/*
+ *  gen_pool data descriptor for gen_pool_first_fit_align.
+ */
+struct genpool_data_align {
+	int align;		/* alignment by bytes for starting address */
+};
+
 extern struct gen_pool *gen_pool_create(int, int);
 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
 extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
@@ -98,6 +107,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
 }
 extern void gen_pool_destroy(struct gen_pool *);
 extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
+		genpool_algo_t algo, void *data);
 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
 		dma_addr_t *dma);
 extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
@@ -110,14 +121,22 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
 		void *data);
 
 extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-		unsigned long start, unsigned int nr, void *data);
+		unsigned long start, unsigned int nr, void *data,
+		struct gen_pool *pool);
+
+extern unsigned long gen_pool_first_fit_align(unsigned long *map,
+		unsigned long size, unsigned long start, unsigned int nr,
+		void *data, struct gen_pool *pool);
+
 
 extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
 		unsigned long size, unsigned long start, unsigned int nr,
-		void *data);
+		void *data, struct gen_pool *pool);
 
 extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-		unsigned long start, unsigned int nr, void *data);
+		unsigned long start, unsigned int nr, void *data,
+		struct gen_pool *pool);
+
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
 		int min_alloc_order, int nid, const char *name);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 116a166..b8cf89d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -270,6 +270,25 @@ EXPORT_SYMBOL(gen_pool_destroy);
  */
 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 {
+	return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+/**
+ * gen_pool_alloc_algo - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+		genpool_algo_t algo, void *data)
+{
 	struct gen_pool_chunk *chunk;
 	unsigned long addr = 0;
 	int order = pool->min_alloc_order;
@@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 
 		end_bit = chunk_size(chunk) >> order;
 retry:
-		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
-				pool->data);
+		start_bit = algo(chunk->bits, end_bit, start_bit,
+				 nbits, data, pool);
 		if (start_bit >= end_bit)
 			continue;
 		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -310,7 +329,7 @@ retry:
 	rcu_read_unlock();
 	return addr;
 }
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_algo);
 
 /**
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -501,15 +520,42 @@ EXPORT_SYMBOL(gen_pool_set_algo);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-		unsigned long start, unsigned int nr, void *data)
+		unsigned long start, unsigned int nr, void *data,
+		struct gen_pool *pool)
 {
 	return bitmap_find_next_zero_area(map, size, start, nr, 0);
 }
 EXPORT_SYMBOL(gen_pool_first_fit);
 
 /**
+ * gen_pool_first_fit_align - find the first available region
+ * of memory matching the size requirement (alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
+		unsigned long start, unsigned int nr, void *data,
+		struct gen_pool *pool)
+{
+	struct genpool_data_align *alignment;
+	unsigned long align_mask;
+	int order;
+
+	alignment = data;
+	order = pool->min_alloc_order;
+	align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
+	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_align);
+
+/**
  * gen_pool_first_fit_order_align - find the first available region
  * of memory matching the size requirement. The region will be aligned
  * to the order of the size specified.
@@ -518,10 +564,11 @@ EXPORT_SYMBOL(gen_pool_first_fit);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
 		unsigned long size, unsigned long start,
-		unsigned int nr, void *data)
+		unsigned int nr, void *data, struct gen_pool *pool)
 {
 	unsigned long align_mask = roundup_pow_of_two(nr) - 1;
 
@@ -537,12 +584,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  *
  * Iterate over the bitmap to find the smallest free region
  * which we can allocate the memory.
  */
 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-		unsigned long start, unsigned int nr, void *data)
+		unsigned long start, unsigned int nr, void *data,
+		struct gen_pool *pool)
 {
 	unsigned long start_bit = size;
 	unsigned long len = size + 1;
-- 
2.1.0.27.g96db324


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2016-08-08 17:49 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-30  2:48 [PATCH v13 1/6] genalloc:support memory-allocation with bytes-alignment to genalloc Zhao Qiang
2015-11-30  2:48 ` [PATCH v13 2/6] genalloc:support allocating specific region Zhao Qiang
2015-11-30  2:48 ` [PATCH v13 3/6] CPM/QE: use genalloc to manage CPM/QE muram Zhao Qiang
2016-08-05 16:58   ` Christophe Leroy
2016-08-08  3:00     ` Qiang Zhao
2016-08-08 17:49       ` christophe leroy
2015-11-30  2:48 ` [PATCH v13 4/6] QE/CPM: move muram management functions to qe_common Zhao Qiang
2015-11-30  2:48 ` [PATCH v13 5/6] QE: use subsys_initcall to init qe Zhao Qiang
2015-11-30  2:48 ` [PATCH v13 6/6] QE: Move QE from arch/powerpc to drivers/soc Zhao Qiang
2015-12-02 23:53 ` [PATCH v13 1/6] genalloc:support memory-allocation with bytes-alignment to genalloc Andrew Morton

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.