netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] Revert "xen-netback: improve ring effeciency for guest RX"
@ 2013-10-08  9:54 Wei Liu
  2013-10-08 10:21 ` Ian Campbell
  0 siblings, 1 reply; 3+ messages in thread
From: Wei Liu @ 2013-10-08  9:54 UTC (permalink / raw)
  To: netdev
  Cc: xen-devel, Wei Liu, Ian Campbell, Annie Li, Matt Wilson,
	Xi Xiong, David Vrabel, Paul Durrant

This reverts commit 4f0581d25827d5e864bcf07b05d73d0d12a20a5c.

The named changeset is causing problem. Let's aim to make this part less
fragile before trying to improve things.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Annie Li <annie.li@oracle.com>
Cc: Matt Wilson <msw@amazon.com>
Cc: Xi Xiong <xixiong@amazon.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
---
 drivers/net/xen-netback/netback.c |  144 +++++++++++++++++++++----------------
 1 file changed, 83 insertions(+), 61 deletions(-)

diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d0b0feb..f3e591c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,14 +47,6 @@
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 
-/* SKB control block overlay is used to store useful information when
- * doing guest RX.
- */
-struct skb_cb_overlay {
-	int meta_slots_used;
-	int peek_slots_count;
-};
-
 /* Provide an option to disable split event channels at load time as
  * event channels are limited resource. Split event channels are
  * enabled by default.
@@ -220,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
 	return false;
 }
 
+struct xenvif_count_slot_state {
+	unsigned long copy_off;
+	bool head;
+};
+
+unsigned int xenvif_count_frag_slots(struct xenvif *vif,
+				     unsigned long offset, unsigned long size,
+				     struct xenvif_count_slot_state *state)
+{
+	unsigned count = 0;
+
+	offset &= ~PAGE_MASK;
+
+	while (size > 0) {
+		unsigned long bytes;
+
+		bytes = PAGE_SIZE - offset;
+
+		if (bytes > size)
+			bytes = size;
+
+		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
+			count++;
+			state->copy_off = 0;
+		}
+
+		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
+			bytes = MAX_BUFFER_OFFSET - state->copy_off;
+
+		state->copy_off += bytes;
+
+		offset += bytes;
+		size -= bytes;
+
+		if (offset == PAGE_SIZE)
+			offset = 0;
+
+		state->head = false;
+	}
+
+	return count;
+}
+
 /*
  * Figure out how many ring slots we're going to need to send @skb to
  * the guest. This function is essentially a dry run of
@@ -227,53 +262,40 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
  */
 unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 {
+	struct xenvif_count_slot_state state;
 	unsigned int count;
-	int i, copy_off;
-	struct skb_cb_overlay *sco;
+	unsigned char *data;
+	unsigned i;
 
-	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
+	state.head = true;
+	state.copy_off = 0;
 
-	copy_off = skb_headlen(skb) % PAGE_SIZE;
+	/* Slot for the first (partial) page of data. */
+	count = 1;
 
+	/* Need a slot for the GSO prefix for GSO extra data? */
 	if (skb_shinfo(skb)->gso_size)
 		count++;
 
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-		unsigned long bytes;
-
-		offset &= ~PAGE_MASK;
-
-		while (size > 0) {
-			BUG_ON(offset >= PAGE_SIZE);
-			BUG_ON(copy_off > MAX_BUFFER_OFFSET);
-
-			bytes = PAGE_SIZE - offset;
-
-			if (bytes > size)
-				bytes = size;
+	data = skb->data;
+	while (data < skb_tail_pointer(skb)) {
+		unsigned long offset = offset_in_page(data);
+		unsigned long size = PAGE_SIZE - offset;
 
-			if (start_new_rx_buffer(copy_off, bytes, 0)) {
-				count++;
-				copy_off = 0;
-			}
+		if (data + size > skb_tail_pointer(skb))
+			size = skb_tail_pointer(skb) - data;
 
-			if (copy_off + bytes > MAX_BUFFER_OFFSET)
-				bytes = MAX_BUFFER_OFFSET - copy_off;
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
 
-			copy_off += bytes;
+		data += size;
+	}
 
-			offset += bytes;
-			size -= bytes;
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
 
-			if (offset == PAGE_SIZE)
-				offset = 0;
-		}
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
 	}
-
-	sco = (struct skb_cb_overlay *)skb->cb;
-	sco->peek_slots_count = count;
 	return count;
 }
 
@@ -305,11 +327,14 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
 	return meta;
 }
 
-/* Set up the grant operations for this fragment. */
+/*
+ * Set up the grant operations for this fragment. If it's a flipping
+ * interface, we also set up the unmap request from here.
+ */
 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
 				 struct netrx_pending_operations *npo,
 				 struct page *page, unsigned long size,
-				 unsigned long offset, int head, int *first)
+				 unsigned long offset, int *head)
 {
 	struct gnttab_copy *copy_gop;
 	struct xenvif_rx_meta *meta;
@@ -333,12 +358,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
 		if (bytes > size)
 			bytes = size;
 
-		if (start_new_rx_buffer(npo->copy_off, bytes, head)) {
+		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
 			/*
 			 * Netfront requires there to be some data in the head
 			 * buffer.
 			 */
-			BUG_ON(*first);
+			BUG_ON(*head);
 
 			meta = get_next_rx_buffer(vif, npo);
 		}
@@ -372,10 +397,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
 		}
 
 		/* Leave a gap for the GSO descriptor. */
-		if (*first && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
 			vif->rx.req_cons++;
 
-		*first = 0; /* There must be something in this buffer now. */
+		*head = 0; /* There must be something in this buffer now. */
 
 	}
 }
@@ -401,7 +426,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	struct xen_netif_rx_request *req;
 	struct xenvif_rx_meta *meta;
 	unsigned char *data;
-	int first = 1;
+	int head = 1;
 	int old_meta_prod;
 
 	old_meta_prod = npo->meta_prod;
@@ -437,7 +462,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 			len = skb_tail_pointer(skb) - data;
 
 		xenvif_gop_frag_copy(vif, skb, npo,
-				     virt_to_page(data), len, offset, 1, &first);
+				     virt_to_page(data), len, offset, &head);
 		data += len;
 	}
 
@@ -446,7 +471,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 				     skb_shinfo(skb)->frags[i].page_offset,
-				     0, &first);
+				     &head);
 	}
 
 	return npo->meta_prod - old_meta_prod;
@@ -504,6 +529,10 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
 	}
 }
 
+struct skb_cb_overlay {
+	int meta_slots_used;
+};
+
 static void xenvif_kick_thread(struct xenvif *vif)
 {
 	wake_up(&vif->wq);
@@ -534,26 +563,19 @@ void xenvif_rx_action(struct xenvif *vif)
 	count = 0;
 
 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
-		RING_IDX old_rx_req_cons;
-
 		vif = netdev_priv(skb->dev);
 		nr_frags = skb_shinfo(skb)->nr_frags;
 
-		old_rx_req_cons = vif->rx.req_cons;
 		sco = (struct skb_cb_overlay *)skb->cb;
 		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
 
-		count += vif->rx.req_cons - old_rx_req_cons;
+		count += nr_frags + 1;
 
 		__skb_queue_tail(&rxq, skb);
 
-		skb = skb_peek(&vif->rx_queue);
-		if (skb == NULL)
-			break;
-		sco = (struct skb_cb_overlay *)skb->cb;
-
 		/* Filled the batch queue? */
-		if (count + sco->peek_slots_count >= XEN_NETIF_RX_RING_SIZE)
+		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
 			break;
 	}
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next] Revert "xen-netback: improve ring effeciency for guest RX"
  2013-10-08  9:54 [PATCH net-next] Revert "xen-netback: improve ring effeciency for guest RX" Wei Liu
@ 2013-10-08 10:21 ` Ian Campbell
  2013-10-08 19:11   ` David Miller
  0 siblings, 1 reply; 3+ messages in thread
From: Ian Campbell @ 2013-10-08 10:21 UTC (permalink / raw)
  To: Wei Liu
  Cc: netdev, xen-devel, Annie Li, Matt Wilson, Xi Xiong, David Vrabel,
	Paul Durrant

On Tue, 2013-10-08 at 10:54 +0100, Wei Liu wrote:
> This reverts commit 4f0581d25827d5e864bcf07b05d73d0d12a20a5c.
> 
> The named changeset is causing problem. Let's aim to make this part less
> fragile before trying to improve things.
> 
> Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>

Although I thought davem would just run git revert so I don't know if it
is needed.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next] Revert "xen-netback: improve ring effeciency for guest RX"
  2013-10-08 10:21 ` Ian Campbell
@ 2013-10-08 19:11   ` David Miller
  0 siblings, 0 replies; 3+ messages in thread
From: David Miller @ 2013-10-08 19:11 UTC (permalink / raw)
  To: ian.campbell
  Cc: wei.liu2, netdev, xen-devel, annie.li, msw, xixiong,
	david.vrabel, paul.durrant

From: Ian Campbell <ian.campbell@citrix.com>
Date: Tue, 8 Oct 2013 11:21:47 +0100

> On Tue, 2013-10-08 at 10:54 +0100, Wei Liu wrote:
>> This reverts commit 4f0581d25827d5e864bcf07b05d73d0d12a20a5c.
>> 
>> The named changeset is causing problem. Let's aim to make this part less
>> fragile before trying to improve things.
>> 
>> Signed-off-by: Wei Liu <wei.liu2@citrix.com>
> Acked-by: Ian Campbell <ian.campbell@citrix.com>
> 
> Although I thought davem would just run git revert so I don't know if it
> is needed.

This works too, because it gives you guys an opportunity to add
some explanation to the commit message.

Applied, thanks.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2013-10-08 19:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-10-08  9:54 [PATCH net-next] Revert "xen-netback: improve ring effeciency for guest RX" Wei Liu
2013-10-08 10:21 ` Ian Campbell
2013-10-08 19:11   ` David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).