All of lore.kernel.org
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: olivier.matz@6wind.com
Cc: thomas.monjalon@6wind.com, dev@dpdk.org,
	Bruce Richardson <bruce.richardson@intel.com>
Subject: [PATCH v4 12/14] ring: separate out head index manipulation for enq/deq
Date: Tue, 28 Mar 2017 21:36:04 +0100	[thread overview]
Message-ID: <20170328203606.27457-13-bruce.richardson@intel.com> (raw)
In-Reply-To: <20170328203606.27457-1-bruce.richardson@intel.com>

We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
V3: renamed parameter "is_mp" in __rte_ring_do_dequeue to the correct "is_sc"
---
 lib/librte_ring/rte_ring.c |   4 +-
 lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
 2 files changed, 149 insertions(+), 183 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 25f64f0..5f98c33 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
 	if (ret < 0 || ret >= (int)sizeof(r->name))
 		return -ENAMETOOLONG;
 	r->flags = flags;
-	r->prod.single = !!(flags & RING_F_SP_ENQ);
-	r->cons.single = !!(flags & RING_F_SC_DEQ);
+	r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+	r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
 	r->size = count;
 	r->mask = count - 1;
 	r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index e801510..3d8f738 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -169,6 +169,12 @@ struct rte_ring {
 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
 #define RTE_RING_SZ_MASK  (unsigned)(0x0fffffff) /**< Ring size mask */
 
+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
+
 /**
  * Calculate the memory size needed for a ring
  *
@@ -287,7 +293,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
 #define ENQUEUE_PTRS() do { \
 	unsigned int i; \
 	const uint32_t size = r->size; \
-	uint32_t idx = prod_head & mask; \
+	uint32_t idx = prod_head & r->mask; \
 	if (likely(idx + n < size)) { \
 		for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
 			r->ring[idx] = obj_table[i]; \
@@ -313,7 +319,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
  * single and multi consumer dequeue functions */
 #define DEQUEUE_PTRS() do { \
 	unsigned int i; \
-	uint32_t idx = cons_head & mask; \
+	uint32_t idx = cons_head & r->mask; \
 	const uint32_t size = r->size; \
 	if (likely(idx + n < size)) { \
 		for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
@@ -336,83 +342,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
 } while (0)
 
 /**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
  *
  * @param r
- *   A pointer to the ring structure.
- * @param obj_table
- *   A pointer to a table of void * pointers (objects).
+ *   A pointer to the ring structure
+ * @param is_sp
+ *   Indicates whether multi-producer path is needed or not
  * @param n
- *   The number of objects to add in the ring from the obj_table.
+ *   The number of elements we will want to enqueue, i.e. how far should the
+ *   head be moved
  * @param behavior
  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
- *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ *   Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ *   Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ *   Returns the amount of free space in the ring BEFORE head was moved
  * @return
  *   Actual number of objects enqueued.
  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
  */
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
-			 unsigned int n, enum rte_ring_queue_behavior behavior,
-			 unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+		unsigned int n, enum rte_ring_queue_behavior behavior,
+		uint32_t *old_head, uint32_t *new_head,
+		uint32_t *free_entries)
 {
-	uint32_t prod_head, prod_next;
-	uint32_t cons_tail, free_entries;
-	const unsigned int max = n;
+	const uint32_t mask = r->mask;
+	unsigned int max = n;
 	int success;
-	uint32_t mask = r->mask;
 
-	/* move prod.head atomically */
 	do {
 		/* Reset n to the initial burst count */
 		n = max;
 
-		prod_head = r->prod.head;
-		cons_tail = r->cons.tail;
+		*old_head = r->prod.head;
+		const uint32_t cons_tail = r->cons.tail;
 		/* The subtraction is done between two unsigned 32bits value
 		 * (the result is always modulo 32 bits even if we have
-		 * prod_head > cons_tail). So 'free_entries' is always between 0
+		 * *old_head > cons_tail). So 'free_entries' is always between 0
 		 * and size(ring)-1. */
-		free_entries = (mask + cons_tail - prod_head);
+		*free_entries = (mask + cons_tail - *old_head);
 
 		/* check that we have enough room in ring */
-		if (unlikely(n > free_entries))
+		if (unlikely(n > *free_entries))
 			n = (behavior == RTE_RING_QUEUE_FIXED) ?
-					0 : free_entries;
+					0 : *free_entries;
 
 		if (n == 0)
-			goto end;
-
-		prod_next = prod_head + n;
-		success = rte_atomic32_cmpset(&r->prod.head, prod_head,
-					      prod_next);
+			return 0;
+
+		*new_head = *old_head + n;
+		if (is_sp)
+			r->prod.head = *new_head, success = 1;
+		else
+			success = rte_atomic32_cmpset(&r->prod.head,
+					*old_head, *new_head);
 	} while (unlikely(success == 0));
-
-	/* write entries in ring */
-	ENQUEUE_PTRS();
-	rte_smp_wmb();
-
-	/*
-	 * If there are other enqueues in progress that preceded us,
-	 * we need to wait for them to complete
-	 */
-	while (unlikely(r->prod.tail != prod_head))
-		rte_pause();
-
-	r->prod.tail = prod_next;
-end:
-	if (free_space != NULL)
-		*free_space = free_entries - n;
 	return n;
 }
 
 /**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
  *
- * @param r
+  * @param r
  *   A pointer to the ring structure.
  * @param obj_table
  *   A pointer to a table of void * pointers (objects).
@@ -420,44 +415,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
  *   The number of objects to add in the ring from the obj_table.
  * @param behavior
  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
- *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ *   Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ *   returns the amount of space after the enqueue operation has finished
  * @return
  *   Actual number of objects enqueued.
  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
  */
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
-			 unsigned int n, enum rte_ring_queue_behavior behavior,
-			 unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+		 unsigned int n, enum rte_ring_queue_behavior behavior,
+		 int is_sp, unsigned int *free_space)
 {
-	uint32_t prod_head, cons_tail;
-	uint32_t prod_next, free_entries;
-	uint32_t mask = r->mask;
-
-	prod_head = r->prod.head;
-	cons_tail = r->cons.tail;
-	/* The subtraction is done between two unsigned 32bits value
-	 * (the result is always modulo 32 bits even if we have
-	 * prod_head > cons_tail). So 'free_entries' is always between 0
-	 * and size(ring)-1. */
-	free_entries = mask + cons_tail - prod_head;
-
-	/* check that we have enough room in ring */
-	if (unlikely(n > free_entries))
-		n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+	uint32_t prod_head, prod_next;
+	uint32_t free_entries;
 
+	n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+			&prod_head, &prod_next, &free_entries);
 	if (n == 0)
 		goto end;
 
-
-	prod_next = prod_head + n;
-	r->prod.head = prod_next;
-
-	/* write entries in ring */
 	ENQUEUE_PTRS();
 	rte_smp_wmb();
 
+	/*
+	 * If there are other enqueues in progress that preceded us,
+	 * we need to wait for them to complete
+	 */
+	while (unlikely(r->prod.tail != prod_head))
+		rte_pause();
+
 	r->prod.tail = prod_next;
+
 end:
 	if (free_space != NULL)
 		*free_space = free_entries - n;
@@ -465,130 +456,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
 }
 
 /**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
  *
  * @param r
- *   A pointer to the ring structure.
- * @param obj_table
- *   A pointer to a table of void * pointers (objects) that will be filled.
+ *   A pointer to the ring structure
+ * @param is_sc
+ *   Indicates whether multi-consumer path is needed or not
  * @param n
- *   The number of objects to dequeue from the ring to the obj_table.
+ *   The number of elements we will want to enqueue, i.e. how far should the
+ *   head be moved
  * @param behavior
  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
- *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ *   Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ *   Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ *   Returns the number of entries in the ring BEFORE head was moved
  * @return
  *   - Actual number of objects dequeued.
  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
  */
-
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
-		 unsigned int n, enum rte_ring_queue_behavior behavior,
-		 unsigned int *available)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+		unsigned int n, enum rte_ring_queue_behavior behavior,
+		uint32_t *old_head, uint32_t *new_head,
+		uint32_t *entries)
 {
-	uint32_t cons_head, prod_tail;
-	uint32_t cons_next, entries;
-	const unsigned max = n;
+	unsigned int max = n;
 	int success;
-	uint32_t mask = r->mask;
 
 	/* move cons.head atomically */
 	do {
 		/* Restore n as it may change every loop */
 		n = max;
 
-		cons_head = r->cons.head;
-		prod_tail = r->prod.tail;
+		*old_head = r->cons.head;
+		const uint32_t prod_tail = r->prod.tail;
 		/* The subtraction is done between two unsigned 32bits value
 		 * (the result is always modulo 32 bits even if we have
 		 * cons_head > prod_tail). So 'entries' is always between 0
 		 * and size(ring)-1. */
-		entries = (prod_tail - cons_head);
+		*entries = (prod_tail - *old_head);
 
 		/* Set the actual entries for dequeue */
-		if (n > entries)
-			n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+		if (n > *entries)
+			n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
 
 		if (unlikely(n == 0))
-			goto end;
-
-		cons_next = cons_head + n;
-		success = rte_atomic32_cmpset(&r->cons.head, cons_head,
-					      cons_next);
+			return 0;
+
+		*new_head = *old_head + n;
+		if (is_sc)
+			r->cons.head = *new_head, success = 1;
+		else
+			success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+					*new_head);
 	} while (unlikely(success == 0));
-
-	/* copy in table */
-	DEQUEUE_PTRS();
-	rte_smp_rmb();
-
-	/*
-	 * If there are other dequeues in progress that preceded us,
-	 * we need to wait for them to complete
-	 */
-	while (unlikely(r->cons.tail != cons_head))
-		rte_pause();
-
-	r->cons.tail = cons_next;
-end:
-	if (available != NULL)
-		*available = entries - n;
 	return n;
 }
 
 /**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
  *
  * @param r
  *   A pointer to the ring structure.
  * @param obj_table
- *   A pointer to a table of void * pointers (objects) that will be filled.
+ *   A pointer to a table of void * pointers (objects).
  * @param n
- *   The number of objects to dequeue from the ring to the obj_table.
+ *   The number of objects to pull from the ring.
  * @param behavior
  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
- *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ *   Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ *   returns the number of remaining ring entries after the dequeue has finished
  * @return
  *   - Actual number of objects dequeued.
  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
  */
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
 		 unsigned int n, enum rte_ring_queue_behavior behavior,
-		 unsigned int *available)
+		 int is_sc, unsigned int *available)
 {
-	uint32_t cons_head, prod_tail;
-	uint32_t cons_next, entries;
-	uint32_t mask = r->mask;
-
-	cons_head = r->cons.head;
-	prod_tail = r->prod.tail;
-	/* The subtraction is done between two unsigned 32bits value
-	 * (the result is always modulo 32 bits even if we have
-	 * cons_head > prod_tail). So 'entries' is always between 0
-	 * and size(ring)-1. */
-	entries = prod_tail - cons_head;
-
-	if (n > entries)
-		n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
-	if (unlikely(entries == 0))
-		goto end;
+	uint32_t cons_head, cons_next;
+	uint32_t entries;
 
-	cons_next = cons_head + n;
-	r->cons.head = cons_next;
+	n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+			&cons_head, &cons_next, &entries);
+	if (n == 0)
+		goto end;
 
-	/* copy in table */
 	DEQUEUE_PTRS();
 	rte_smp_rmb();
 
+	/*
+	 * If there are other enqueues in progress that preceded us,
+	 * we need to wait for them to complete
+	 */
+	while (unlikely(r->cons.tail != cons_head))
+		rte_pause();
+
 	r->cons.tail = cons_next;
+
 end:
 	if (available != NULL)
 		*available = entries - n;
@@ -614,8 +587,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
 			 unsigned int n, unsigned int *free_space)
 {
-	return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
-			free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+			__IS_MP, free_space);
 }
 
 /**
@@ -634,8 +607,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
 			 unsigned int n, unsigned int *free_space)
 {
-	return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
-			free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+			__IS_SP, free_space);
 }
 
 /**
@@ -658,10 +631,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
 		      unsigned int n, unsigned int *free_space)
 {
-	if (r->prod.single)
-		return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
-	else
-		return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+			r->prod.single, free_space);
 }
 
 /**
@@ -741,8 +712,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
 		unsigned int n, unsigned int *available)
 {
-	return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
-			available);
+	return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+			__IS_MC, available);
 }
 
 /**
@@ -762,8 +733,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
 		unsigned int n, unsigned int *available)
 {
-	return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
-			available);
+	return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+			__IS_SC, available);
 }
 
 /**
@@ -786,10 +757,8 @@ static inline unsigned int __attribute__((always_inline))
 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
 		unsigned int *available)
 {
-	if (r->cons.single)
-		return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
-	else
-		return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
+	return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+				r->cons.single, available);
 }
 
 /**
@@ -972,8 +941,8 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
 			 unsigned int n, unsigned int *free_space)
 {
-	return __rte_ring_mp_do_enqueue(r, obj_table, n,
-			RTE_RING_QUEUE_VARIABLE, free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n,
+			RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
 }
 
 /**
@@ -992,8 +961,8 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
 			 unsigned int n, unsigned int *free_space)
 {
-	return __rte_ring_sp_do_enqueue(r, obj_table, n,
-			RTE_RING_QUEUE_VARIABLE, free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n,
+			RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
 }
 
 /**
@@ -1016,10 +985,8 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
 		      unsigned int n, unsigned int *free_space)
 {
-	if (r->prod.single)
-		return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
-	else
-		return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
+	return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+			r->prod.single, free_space);
 }
 
 /**
@@ -1043,8 +1010,8 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
 		unsigned int n, unsigned int *available)
 {
-	return __rte_ring_mc_do_dequeue(r, obj_table, n,
-			RTE_RING_QUEUE_VARIABLE, available);
+	return __rte_ring_do_dequeue(r, obj_table, n,
+			RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
 }
 
 /**
@@ -1065,8 +1032,8 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
 		unsigned int n, unsigned int *available)
 {
-	return __rte_ring_sc_do_dequeue(r, obj_table, n,
-			RTE_RING_QUEUE_VARIABLE, available);
+	return __rte_ring_do_dequeue(r, obj_table, n,
+			RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
 }
 
 /**
@@ -1089,10 +1056,9 @@ static inline unsigned __attribute__((always_inline))
 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
 		unsigned int n, unsigned int *available)
 {
-	if (r->cons.single)
-		return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
-	else
-		return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
+	return __rte_ring_do_dequeue(r, obj_table, n,
+				RTE_RING_QUEUE_VARIABLE,
+				r->cons.single, available);
 }
 
 #ifdef __cplusplus
-- 
2.9.3

  parent reply	other threads:[~2017-03-28 20:36 UTC|newest]

Thread overview: 116+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-23 17:23 [PATCH v1 00/14] refactor and cleanup of rte_ring Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 01/14] ring: remove split cacheline build setting Bruce Richardson
2017-02-28 11:35   ` Jerin Jacob
2017-02-28 11:57     ` Bruce Richardson
2017-02-28 12:08       ` Jerin Jacob
2017-02-28 13:52         ` Bruce Richardson
2017-02-28 17:54           ` Jerin Jacob
2017-03-01  9:47             ` Bruce Richardson
2017-03-01 10:17               ` Olivier Matz
2017-03-01 10:42                 ` Bruce Richardson
2017-03-01 11:06                   ` Olivier Matz
2017-03-01 11:19                     ` Jerin Jacob
2017-03-01 12:12                       ` Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 02/14] ring: create common structure for prod and cons metadata Bruce Richardson
2017-03-01 10:22   ` Olivier Matz
2017-03-01 10:33     ` Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 03/14] ring: eliminate duplication of size and mask fields Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 04/14] ring: remove debug setting Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 05/14] ring: remove the yield when waiting for tail update Bruce Richardson
2017-02-23 17:23 ` [PATCH v1 06/14] ring: remove watermark support Bruce Richardson
2017-03-01 10:34   ` Olivier Matz
2017-03-01 10:43     ` Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 07/14] ring: make bulk and burst fn return vals consistent Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 08/14] ring: allow enqueue fns to return free space value Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 09/14] ring: allow dequeue fns to return remaining entry count Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 10/14] examples/quota_watermark: use ring space for watermarks Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 11/14] ring: reduce scope of local variables Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 12/14] ring: separate out head index manipulation for enq/deq Bruce Richardson
2017-03-08 10:49   ` Olivier MATZ
2017-03-08 12:06     ` Bruce Richardson
2017-03-14  8:56       ` Olivier Matz
2017-02-23 17:24 ` [PATCH v1 13/14] ring: create common function for updating tail idx Bruce Richardson
2017-02-23 17:24 ` [PATCH v1 14/14] ring: make ring struct and enq/deq macros type agnostic Bruce Richardson
2017-03-07 11:32 ` [PATCH v2 00/14] refactor and cleanup of rte_ring Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 01/14] ring: remove split cacheline build setting Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 02/14] ring: create common structure for prod and cons metadata Bruce Richardson
2017-03-15 14:01     ` Thomas Monjalon
2017-03-22 16:38       ` Bruce Richardson
2017-03-24 14:55       ` Bruce Richardson
2017-03-24 16:41         ` Olivier Matz
2017-03-24 16:57           ` Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 03/14] ring: eliminate duplication of size and mask fields Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 04/14] ring: remove debug setting Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 05/14] ring: remove the yield when waiting for tail update Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 06/14] ring: remove watermark support Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 07/14] ring: make bulk and burst fn return vals consistent Bruce Richardson
2017-03-08 10:22     ` Olivier MATZ
2017-03-08 12:08       ` Bruce Richardson
2017-03-14  8:56         ` Olivier Matz
2017-03-07 11:32   ` [PATCH v2 08/14] ring: allow enqueue fns to return free space value Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 09/14] ring: allow dequeue fns to return remaining entry count Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 10/14] examples/quota_watermark: use ring space for watermarks Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 11/14] ring: reduce scope of local variables Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 12/14] ring: separate out head index manipulation for enq/deq Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 13/14] ring: create common function for updating tail idx Bruce Richardson
2017-03-07 11:32   ` [PATCH v2 14/14] ring: make ring struct and enq/deq macros type agnostic Bruce Richardson
2017-03-14  8:59   ` [PATCH v2 00/14] refactor and cleanup of rte_ring Olivier Matz
2017-03-24 17:09   ` [PATCH v3 " Bruce Richardson
2017-03-24 17:09     ` [PATCH v3 01/14] ring: remove split cacheline build setting Bruce Richardson
2017-03-24 17:09     ` [PATCH v3 02/14] ring: create common structure for prod and cons metadata Bruce Richardson
2017-03-27  7:20       ` Olivier Matz
2017-03-24 17:09     ` [PATCH v3 03/14] ring: eliminate duplication of size and mask fields Bruce Richardson
2017-03-27  9:52       ` Thomas Monjalon
2017-03-27 10:13         ` Bruce Richardson
2017-03-27 10:15         ` Bruce Richardson
2017-03-27 13:13           ` Thomas Monjalon
2017-03-27 14:57             ` Bruce Richardson
2017-03-24 17:09     ` [PATCH v3 04/14] ring: remove debug setting Bruce Richardson
2017-03-24 17:09     ` [PATCH v3 05/14] ring: remove the yield when waiting for tail update Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 06/14] ring: remove watermark support Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 07/14] ring: make bulk and burst fn return vals consistent Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 08/14] ring: allow enqueue fns to return free space value Bruce Richardson
2017-03-28  7:12       ` Thomas Monjalon
2017-03-28  8:16         ` Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 09/14] ring: allow dequeue fns to return remaining entry count Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 10/14] examples/quota_watermark: use ring space for watermarks Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 11/14] ring: reduce scope of local variables Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 12/14] ring: separate out head index manipulation for enq/deq Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 13/14] ring: create common function for updating tail idx Bruce Richardson
2017-03-24 17:10     ` [PATCH v3 14/14] ring: make ring struct and enq/deq macros type agnostic Bruce Richardson
2017-03-28 20:35     ` [PATCH v4 00/14] refactor and cleanup of rte_ring Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 01/14] ring: remove split cacheline build setting Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 02/14] ring: create common structure for prod and cons metadata Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 03/14] ring: eliminate duplication of size and mask fields Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 04/14] ring: remove debug setting Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 05/14] ring: remove the yield when waiting for tail update Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 06/14] ring: remove watermark support Bruce Richardson
2017-03-28 20:35       ` [PATCH v4 07/14] ring: make bulk and burst fn return vals consistent Bruce Richardson
2017-03-28 20:36       ` [PATCH v4 08/14] ring: allow enqueue fns to return free space value Bruce Richardson
2017-03-28 20:36       ` [PATCH v4 09/14] ring: allow dequeue fns to return remaining entry count Bruce Richardson
2017-03-28 20:36       ` [PATCH v4 10/14] examples/quota_watermark: use ring space for watermarks Bruce Richardson
2017-03-28 20:36       ` [PATCH v4 11/14] ring: reduce scope of local variables Bruce Richardson
2017-03-28 20:36       ` Bruce Richardson [this message]
2017-03-28 20:36       ` [PATCH v4 13/14] ring: create common function for updating tail idx Bruce Richardson
2017-03-28 20:36       ` [PATCH v4 14/14] ring: make ring struct and enq/deq macros type agnostic Bruce Richardson
2017-03-29  2:47       ` [PATCH v4 00/14] refactor and cleanup of rte_ring Yuanhan Liu
2017-03-29 13:09       ` [PATCH v5 " Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 01/14] ring: remove split cacheline build setting Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 02/14] ring: create common structure for prod and cons metadata Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 03/14] ring: eliminate duplication of size and mask fields Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 04/14] ring: remove debug setting Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 05/14] ring: remove the yield when waiting for tail update Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 06/14] ring: remove watermark support Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 07/14] ring: make bulk and burst fn return vals consistent Bruce Richardson
2017-04-13  6:42           ` Wang, Zhihong
2017-04-13  8:33             ` Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 08/14] ring: allow enqueue fns to return free space value Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 09/14] ring: allow dequeue fns to return remaining entry count Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 10/14] examples/quota_watermark: use ring space for watermarks Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 11/14] ring: reduce scope of local variables Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 12/14] ring: separate out head index manipulation for enq/deq Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 13/14] ring: create common function for updating tail idx Bruce Richardson
2017-03-29 13:09         ` [PATCH v5 14/14] ring: make ring struct and enq/deq macros type agnostic Bruce Richardson
2017-03-29 20:33         ` [PATCH v5 00/14] refactor and cleanup of rte_ring Thomas Monjalon
2017-03-31 14:37         ` Ferruh Yigit
2017-04-03 17:55           ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170328203606.27457-13-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=olivier.matz@6wind.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.