* [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
@ 2014-06-04 18:58 Zoltan Kiss
2014-06-05 8:12 ` Paul Durrant
` (5 more replies)
0 siblings, 6 replies; 10+ messages in thread
From: Zoltan Kiss @ 2014-06-04 18:58 UTC (permalink / raw)
To: xen-devel, ian.campbell, wei.liu2, paul.durrant, linux
Cc: netdev, david.vrabel, davem, Zoltan Kiss
A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
the next BUG_ON a few lines down, as the packet consumes more slots than
estimated.
This patch introduces full_coalesce on the skb callback buffer, which is used in
start_new_rx_buffer() to decide whether netback needs coalescing more
aggresively. By doing that, no packet should need more than
(XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
slot, it doesn't carry data, therefore irrelevant in this case), as the provided
buffers are fully utilized.
Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
v2:
- clarify that GSO slot doesn't count here
- fix style
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64ab1d1..6a21588 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
-static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+static bool start_new_rx_buffer(int offset, unsigned long size, int head,
+ bool full_coalesce)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
+ * and (iv) there is no need to fully utilize the buffers
*
* Where:
* - (i) stops us splitting a frag into two copies
@@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
+ * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
+ * slot
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
@@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* own buffers as before.
*/
BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
+ !full_coalesce)
return true;
return false;
@@ -227,6 +232,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
return meta;
}
+struct xenvif_rx_cb {
+ int meta_slots_used;
+ bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
@@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+ if (start_new_rx_buffer(npo->copy_off,
+ bytes,
+ *head,
+ XENVIF_RX_CB(skb)->full_coalesce)) {
/*
* Netfront requires there to be some data in the head
* buffer.
@@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
}
}
-struct xenvif_rx_cb {
- int meta_slots_used;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
@@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
/* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS.
+ * at MAX_SKB_FRAGS. In this case netback will fully coalesce
+ * the skb into the provided slots.
*/
- if (max_slots_needed > MAX_SKB_FRAGS)
+ if (max_slots_needed > MAX_SKB_FRAGS) {
max_slots_needed = MAX_SKB_FRAGS;
+ XENVIF_RX_CB(skb)->full_coalesce = true;
+ } else
+ XENVIF_RX_CB(skb)->full_coalesce = false;
/* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
^ permalink raw reply related [flat|nested] 10+ messages in thread
* RE: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
2014-06-05 8:12 ` Paul Durrant
@ 2014-06-05 8:12 ` Paul Durrant
2014-06-05 10:07 ` Wei Liu
` (3 subsequent siblings)
5 siblings, 0 replies; 10+ messages in thread
From: Paul Durrant @ 2014-06-05 8:12 UTC (permalink / raw)
To: Zoltan Kiss, xen-devel, Ian Campbell, Wei Liu, linux
Cc: netdev, David Vrabel, davem
> -----Original Message-----
> From: Zoltan Kiss
> Sent: 04 June 2014 19:59
> To: xen-devel@lists.xenproject.org; Ian Campbell; Wei Liu; Paul Durrant;
> linux@eikelenboom.it
> Cc: netdev@vger.kernel.org; David Vrabel; davem@davemloft.net; Zoltan
> Kiss
> Subject: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many
> slots
>
> A recent commit (a02eb4 "xen-netback: worse-case estimate in
> xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that
> triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used
> in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the
> optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
> Cc: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@gmail.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Ian Campbell <ian.campbell@citrix.com>
> Cc: David Vrabel <david.vrabel@citrix.com>
> ---
> v2:
> - clarify that GSO slot doesn't count here
> - fix style
>
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-
> netback/netback.c
> index 64ab1d1..6a21588 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif,
> int needed)
> * adding 'size' bytes to a buffer which currently contains 'offset'
> * bytes.
> */
> -static bool start_new_rx_buffer(int offset, unsigned long size, int head)
> +static bool start_new_rx_buffer(int offset, unsigned long size, int head,
> + bool full_coalesce)
> {
> /* simple case: we have completely filled the current buffer. */
> if (offset == MAX_BUFFER_OFFSET)
> @@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * (i) this frag would fit completely in the next buffer
> * and (ii) there is already some data in the current buffer
> * and (iii) this is not the head buffer.
> + * and (iv) there is no need to fully utilize the buffers
> *
> * Where:
> * - (i) stops us splitting a frag into two copies
> @@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * by (ii) but is explicitly checked because
> * netfront relies on the first buffer being
> * non-empty and can crash otherwise.
> + * - (iv) is needed for skbs which can use up more than
> MAX_SKB_FRAGS
> + * slot
> *
> * This means we will effectively linearise small
> * frags but do not needlessly split large buffers
> @@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * own buffers as before.
> */
> BUG_ON(size > MAX_BUFFER_OFFSET);
> - if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
> + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
> + !full_coalesce)
> return true;
>
> return false;
> @@ -227,6 +232,13 @@ static struct xenvif_rx_meta
> *get_next_rx_buffer(struct xenvif *vif,
> return meta;
> }
>
> +struct xenvif_rx_cb {
> + int meta_slots_used;
> + bool full_coalesce;
> +};
> +
> +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
> +
> /*
> * Set up the grant operations for this fragment. If it's a flipping
> * interface, we also set up the unmap request from here.
> @@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
> struct sk_buff *skb,
> if (bytes > size)
> bytes = size;
>
> - if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
> + if (start_new_rx_buffer(npo->copy_off,
> + bytes,
> + *head,
> + XENVIF_RX_CB(skb)->full_coalesce))
> {
> /*
> * Netfront requires there to be some data in the
> head
> * buffer.
> @@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif
> *vif, int status,
> }
> }
>
> -struct xenvif_rx_cb {
> - int meta_slots_used;
> -};
> -
> -#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
> -
> void xenvif_kick_thread(struct xenvif *vif)
> {
> wake_up(&vif->wq);
> @@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
>
> /* To avoid the estimate becoming too pessimal for some
> * frontends that limit posted rx requests, cap the estimate
> - * at MAX_SKB_FRAGS.
> + * at MAX_SKB_FRAGS. In this case netback will fully coalesce
> + * the skb into the provided slots.
> */
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
>
> /* We may need one more slot for GSO metadata */
> if (skb_is_gso(skb) &&
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
@ 2014-06-05 8:12 ` Paul Durrant
2014-06-05 8:12 ` Paul Durrant
` (4 subsequent siblings)
5 siblings, 0 replies; 10+ messages in thread
From: Paul Durrant @ 2014-06-05 8:12 UTC (permalink / raw)
To: Zoltan Kiss, xen-devel, Ian Campbell, Wei Liu, linux
Cc: netdev, David Vrabel, davem
> -----Original Message-----
> From: Zoltan Kiss
> Sent: 04 June 2014 19:59
> To: xen-devel@lists.xenproject.org; Ian Campbell; Wei Liu; Paul Durrant;
> linux@eikelenboom.it
> Cc: netdev@vger.kernel.org; David Vrabel; davem@davemloft.net; Zoltan
> Kiss
> Subject: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many
> slots
>
> A recent commit (a02eb4 "xen-netback: worse-case estimate in
> xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that
> triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used
> in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the
> optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
> Cc: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@gmail.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Ian Campbell <ian.campbell@citrix.com>
> Cc: David Vrabel <david.vrabel@citrix.com>
> ---
> v2:
> - clarify that GSO slot doesn't count here
> - fix style
>
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-
> netback/netback.c
> index 64ab1d1..6a21588 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif,
> int needed)
> * adding 'size' bytes to a buffer which currently contains 'offset'
> * bytes.
> */
> -static bool start_new_rx_buffer(int offset, unsigned long size, int head)
> +static bool start_new_rx_buffer(int offset, unsigned long size, int head,
> + bool full_coalesce)
> {
> /* simple case: we have completely filled the current buffer. */
> if (offset == MAX_BUFFER_OFFSET)
> @@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * (i) this frag would fit completely in the next buffer
> * and (ii) there is already some data in the current buffer
> * and (iii) this is not the head buffer.
> + * and (iv) there is no need to fully utilize the buffers
> *
> * Where:
> * - (i) stops us splitting a frag into two copies
> @@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * by (ii) but is explicitly checked because
> * netfront relies on the first buffer being
> * non-empty and can crash otherwise.
> + * - (iv) is needed for skbs which can use up more than
> MAX_SKB_FRAGS
> + * slot
> *
> * This means we will effectively linearise small
> * frags but do not needlessly split large buffers
> @@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned
> long size, int head)
> * own buffers as before.
> */
> BUG_ON(size > MAX_BUFFER_OFFSET);
> - if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
> + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
> + !full_coalesce)
> return true;
>
> return false;
> @@ -227,6 +232,13 @@ static struct xenvif_rx_meta
> *get_next_rx_buffer(struct xenvif *vif,
> return meta;
> }
>
> +struct xenvif_rx_cb {
> + int meta_slots_used;
> + bool full_coalesce;
> +};
> +
> +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
> +
> /*
> * Set up the grant operations for this fragment. If it's a flipping
> * interface, we also set up the unmap request from here.
> @@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
> struct sk_buff *skb,
> if (bytes > size)
> bytes = size;
>
> - if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
> + if (start_new_rx_buffer(npo->copy_off,
> + bytes,
> + *head,
> + XENVIF_RX_CB(skb)->full_coalesce))
> {
> /*
> * Netfront requires there to be some data in the
> head
> * buffer.
> @@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif
> *vif, int status,
> }
> }
>
> -struct xenvif_rx_cb {
> - int meta_slots_used;
> -};
> -
> -#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
> -
> void xenvif_kick_thread(struct xenvif *vif)
> {
> wake_up(&vif->wq);
> @@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
>
> /* To avoid the estimate becoming too pessimal for some
> * frontends that limit posted rx requests, cap the estimate
> - * at MAX_SKB_FRAGS.
> + * at MAX_SKB_FRAGS. In this case netback will fully coalesce
> + * the skb into the provided slots.
> */
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
>
> /* We may need one more slot for GSO metadata */
> if (skb_is_gso(skb) &&
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
2014-06-05 8:12 ` Paul Durrant
2014-06-05 8:12 ` Paul Durrant
@ 2014-06-05 10:07 ` Wei Liu
2014-06-05 10:56 ` Sander Eikelenboom
2014-06-05 10:56 ` Sander Eikelenboom
2014-06-05 10:07 ` Wei Liu
` (2 subsequent siblings)
5 siblings, 2 replies; 10+ messages in thread
From: Wei Liu @ 2014-06-05 10:07 UTC (permalink / raw)
To: Zoltan Kiss
Cc: xen-devel, ian.campbell, wei.liu2, paul.durrant, linux, netdev,
david.vrabel, davem
On Wed, Jun 04, 2014 at 07:58:51PM +0100, Zoltan Kiss wrote:
> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Minor nit below.
[...]
> @@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
>
> /* To avoid the estimate becoming too pessimal for some
> * frontends that limit posted rx requests, cap the estimate
> - * at MAX_SKB_FRAGS.
> + * at MAX_SKB_FRAGS. In this case netback will fully coalesce
> + * the skb into the provided slots.
> */
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
>
Please enclose this line with {}, which is required by kernel coding
style.
Wei.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
` (2 preceding siblings ...)
2014-06-05 10:07 ` Wei Liu
@ 2014-06-05 10:07 ` Wei Liu
2014-06-05 22:09 ` David Miller
2014-06-05 22:09 ` David Miller
5 siblings, 0 replies; 10+ messages in thread
From: Wei Liu @ 2014-06-05 10:07 UTC (permalink / raw)
To: Zoltan Kiss
Cc: wei.liu2, ian.campbell, netdev, linux, paul.durrant,
david.vrabel, xen-devel, davem
On Wed, Jun 04, 2014 at 07:58:51PM +0100, Zoltan Kiss wrote:
> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Minor nit below.
[...]
> @@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
>
> /* To avoid the estimate becoming too pessimal for some
> * frontends that limit posted rx requests, cap the estimate
> - * at MAX_SKB_FRAGS.
> + * at MAX_SKB_FRAGS. In this case netback will fully coalesce
> + * the skb into the provided slots.
> */
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
>
Please enclose this line with {}, which is required by kernel coding
style.
Wei.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-05 10:07 ` Wei Liu
@ 2014-06-05 10:56 ` Sander Eikelenboom
2014-06-05 10:56 ` Sander Eikelenboom
1 sibling, 0 replies; 10+ messages in thread
From: Sander Eikelenboom @ 2014-06-05 10:56 UTC (permalink / raw)
To: Zoltan Kiss
Cc: Wei Liu, xen-devel, ian.campbell, paul.durrant, netdev,
david.vrabel, davem
Thursday, June 5, 2014, 12:07:34 PM, you wrote:
> On Wed, Jun 04, 2014 at 07:58:51PM +0100, Zoltan Kiss wrote:
>> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
>> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
>> the next BUG_ON a few lines down, as the packet consumes more slots than
>> estimated.
>> This patch introduces full_coalesce on the skb callback buffer, which is used in
>> start_new_rx_buffer() to decide whether netback needs coalescing more
>> aggresively. By doing that, no packet should need more than
>> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
>> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
>> buffers are fully utilized.
>>
Hi Zoltan,
JFYI: tested this patch(v2) and it seems to work fine for my testcases, thanks !
--
Sander
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-05 10:07 ` Wei Liu
2014-06-05 10:56 ` Sander Eikelenboom
@ 2014-06-05 10:56 ` Sander Eikelenboom
1 sibling, 0 replies; 10+ messages in thread
From: Sander Eikelenboom @ 2014-06-05 10:56 UTC (permalink / raw)
To: Zoltan Kiss
Cc: Wei Liu, ian.campbell, netdev, paul.durrant, david.vrabel,
xen-devel, davem
Thursday, June 5, 2014, 12:07:34 PM, you wrote:
> On Wed, Jun 04, 2014 at 07:58:51PM +0100, Zoltan Kiss wrote:
>> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
>> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
>> the next BUG_ON a few lines down, as the packet consumes more slots than
>> estimated.
>> This patch introduces full_coalesce on the skb callback buffer, which is used in
>> start_new_rx_buffer() to decide whether netback needs coalescing more
>> aggresively. By doing that, no packet should need more than
>> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
>> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
>> buffers are fully utilized.
>>
Hi Zoltan,
JFYI: tested this patch(v2) and it seems to work fine for my testcases, thanks !
--
Sander
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
` (3 preceding siblings ...)
2014-06-05 10:07 ` Wei Liu
@ 2014-06-05 22:09 ` David Miller
2014-06-05 22:09 ` David Miller
5 siblings, 0 replies; 10+ messages in thread
From: David Miller @ 2014-06-05 22:09 UTC (permalink / raw)
To: zoltan.kiss
Cc: xen-devel, ian.campbell, wei.liu2, paul.durrant, linux, netdev,
david.vrabel
From: Zoltan Kiss <zoltan.kiss@citrix.com>
Date: Wed, 4 Jun 2014 19:58:51 +0100
> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Applied, and:
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
I took care of adding the {} to the else block, as suggested by Wei Liu.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
` (4 preceding siblings ...)
2014-06-05 22:09 ` David Miller
@ 2014-06-05 22:09 ` David Miller
5 siblings, 0 replies; 10+ messages in thread
From: David Miller @ 2014-06-05 22:09 UTC (permalink / raw)
To: zoltan.kiss
Cc: wei.liu2, ian.campbell, netdev, linux, paul.durrant,
david.vrabel, xen-devel
From: Zoltan Kiss <zoltan.kiss@citrix.com>
Date: Wed, 4 Jun 2014 19:58:51 +0100
> A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
> underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
> the next BUG_ON a few lines down, as the packet consumes more slots than
> estimated.
> This patch introduces full_coalesce on the skb callback buffer, which is used in
> start_new_rx_buffer() to decide whether netback needs coalescing more
> aggresively. By doing that, no packet should need more than
> (XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
> slot, it doesn't carry data, therefore irrelevant in this case), as the provided
> buffers are fully utilized.
>
> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Applied, and:
> - if (max_slots_needed > MAX_SKB_FRAGS)
> + if (max_slots_needed > MAX_SKB_FRAGS) {
> max_slots_needed = MAX_SKB_FRAGS;
> + XENVIF_RX_CB(skb)->full_coalesce = true;
> + } else
> + XENVIF_RX_CB(skb)->full_coalesce = false;
I took care of adding the {} to the else block, as suggested by Wei Liu.
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots
@ 2014-06-04 18:58 Zoltan Kiss
0 siblings, 0 replies; 10+ messages in thread
From: Zoltan Kiss @ 2014-06-04 18:58 UTC (permalink / raw)
To: xen-devel, ian.campbell, wei.liu2, paul.durrant, linux
Cc: netdev, david.vrabel, Zoltan Kiss, davem
A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
the next BUG_ON a few lines down, as the packet consumes more slots than
estimated.
This patch introduces full_coalesce on the skb callback buffer, which is used in
start_new_rx_buffer() to decide whether netback needs coalescing more
aggresively. By doing that, no packet should need more than
(XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
slot, it doesn't carry data, therefore irrelevant in this case), as the provided
buffers are fully utilized.
Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
v2:
- clarify that GSO slot doesn't count here
- fix style
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64ab1d1..6a21588 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
-static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+static bool start_new_rx_buffer(int offset, unsigned long size, int head,
+ bool full_coalesce)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
+ * and (iv) there is no need to fully utilize the buffers
*
* Where:
* - (i) stops us splitting a frag into two copies
@@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
+ * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
+ * slot
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
@@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* own buffers as before.
*/
BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
+ !full_coalesce)
return true;
return false;
@@ -227,6 +232,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
return meta;
}
+struct xenvif_rx_cb {
+ int meta_slots_used;
+ bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
@@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+ if (start_new_rx_buffer(npo->copy_off,
+ bytes,
+ *head,
+ XENVIF_RX_CB(skb)->full_coalesce)) {
/*
* Netfront requires there to be some data in the head
* buffer.
@@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
}
}
-struct xenvif_rx_cb {
- int meta_slots_used;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
@@ -602,10 +611,14 @@ static void xenvif_rx_action(struct xenvif *vif)
/* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS.
+ * at MAX_SKB_FRAGS. In this case netback will fully coalesce
+ * the skb into the provided slots.
*/
- if (max_slots_needed > MAX_SKB_FRAGS)
+ if (max_slots_needed > MAX_SKB_FRAGS) {
max_slots_needed = MAX_SKB_FRAGS;
+ XENVIF_RX_CB(skb)->full_coalesce = true;
+ } else
+ XENVIF_RX_CB(skb)->full_coalesce = false;
/* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
^ permalink raw reply related [flat|nested] 10+ messages in thread
end of thread, other threads:[~2014-06-05 22:09 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-06-04 18:58 [PATCH net v2] xen-netback: Fix handling of skbs requiring too many slots Zoltan Kiss
2014-06-05 8:12 ` Paul Durrant
2014-06-05 8:12 ` Paul Durrant
2014-06-05 10:07 ` Wei Liu
2014-06-05 10:56 ` Sander Eikelenboom
2014-06-05 10:56 ` Sander Eikelenboom
2014-06-05 10:07 ` Wei Liu
2014-06-05 22:09 ` David Miller
2014-06-05 22:09 ` David Miller
2014-06-04 18:58 Zoltan Kiss
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.