netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Borkmann <daniel@iogearbox.net>
To: Yuya Kusakabe <yuya.kusakabe@gmail.com>,
	Jason Wang <jasowang@redhat.com>
Cc: ast@kernel.org, davem@davemloft.net, hawk@kernel.org,
	jakub.kicinski@netronome.com, john.fastabend@gmail.com,
	kafai@fb.com, mst@redhat.com, netdev@vger.kernel.org,
	songliubraving@fb.com, yhs@fb.com
Subject: Re: [PATCH bpf-next v3] virtio_net: add XDP meta data support
Date: Tue, 9 Jul 2019 00:38:49 +0200	[thread overview]
Message-ID: <a5f4601a-db0e-e65b-5b32-cc7e04ba90be@iogearbox.net> (raw)
In-Reply-To: <52e3fc0d-bdd7-83ee-58e6-488e2b91cc83@gmail.com>

On 07/02/2019 04:11 PM, Yuya Kusakabe wrote:
> On 7/2/19 5:33 PM, Jason Wang wrote:
>> On 2019/7/2 下午4:16, Yuya Kusakabe wrote:
>>> This adds XDP meta data support to both receive_small() and
>>> receive_mergeable().
>>>
>>> Fixes: de8f3a83b0a0 ("bpf: add meta pointer for direct access")
>>> Signed-off-by: Yuya Kusakabe <yuya.kusakabe@gmail.com>
>>> ---
>>> v3:
>>>   - fix preserve the vnet header in receive_small().
>>> v2:
>>>   - keep copy untouched in page_to_skb().
>>>   - preserve the vnet header in receive_small().
>>>   - fix indentation.
>>> ---
>>>   drivers/net/virtio_net.c | 45 +++++++++++++++++++++++++++-------------
>>>   1 file changed, 31 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>> index 4f3de0ac8b0b..03a1ae6fe267 100644
>>> --- a/drivers/net/virtio_net.c
>>> +++ b/drivers/net/virtio_net.c
>>> @@ -371,7 +371,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>>>                      struct receive_queue *rq,
>>>                      struct page *page, unsigned int offset,
>>>                      unsigned int len, unsigned int truesize,
>>> -                   bool hdr_valid)
>>> +                   bool hdr_valid, unsigned int metasize)
>>>   {
>>>       struct sk_buff *skb;
>>>       struct virtio_net_hdr_mrg_rxbuf *hdr;
>>> @@ -393,7 +393,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>>>       else
>>>           hdr_padded_len = sizeof(struct padded_vnet_hdr);
>>>   -    if (hdr_valid)
>>> +    if (hdr_valid && !metasize)
>>>           memcpy(hdr, p, hdr_len);
>>>         len -= hdr_len;
>>> @@ -405,6 +405,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>>>           copy = skb_tailroom(skb);
>>>       skb_put_data(skb, p, copy);
>>>   +    if (metasize) {
>>> +        __skb_pull(skb, metasize);
>>> +        skb_metadata_set(skb, metasize);
>>> +    }
>>> +
>>>       len -= copy;
>>>       offset += copy;
>>>   @@ -644,6 +649,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
>>>       unsigned int delta = 0;
>>>       struct page *xdp_page;
>>>       int err;
>>> +    unsigned int metasize = 0;
>>>         len -= vi->hdr_len;
>>>       stats->bytes += len;
>>> @@ -683,10 +689,13 @@ static struct sk_buff *receive_small(struct net_device *dev,
>>>             xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
>>>           xdp.data = xdp.data_hard_start + xdp_headroom;
>>> -        xdp_set_data_meta_invalid(&xdp);
>>>           xdp.data_end = xdp.data + len;
>>> +        xdp.data_meta = xdp.data;
>>>           xdp.rxq = &rq->xdp_rxq;
>>>           orig_data = xdp.data;
>>> +        /* Copy the vnet header to the front of data_hard_start to avoid
>>> +         * overwriting by XDP meta data */
>>> +        memcpy(xdp.data_hard_start - vi->hdr_len, xdp.data - vi->hdr_len, vi->hdr_len);

I'm not fully sure if I'm following this one correctly, probably just missing
something. Isn't the vnet header based on how we set up xdp.data_hard_start
earlier already in front of it? Wouldn't we copy invalid data from xdp.data -
vi->hdr_len into the vnet header at that point (given there can be up to 256
bytes of headroom between the two)? If it's relative to xdp.data and headroom
is >0, then BPF prog could otherwise mangle this; something doesn't add up to
me here. Could you clarify? Thx

>> What happens if we have a large metadata that occupies all headroom here?
>>
>> Thanks
> 
> Do you mean a large "XDP" metadata? If a large metadata is a large "XDP" metadata, I think we can not use a metadata that occupies all headroom. The size of metadata limited by bpf_xdp_adjust_meta() as below.
> bpf_xdp_adjust_meta() in net/core/filter.c:
> 	if (unlikely((metalen & (sizeof(__u32) - 1)) ||
> 		     (metalen > 32)))
> 		return -EACCES;
> 
> Thanks.
> 
>>
>>
>>>           act = bpf_prog_run_xdp(xdp_prog, &xdp);
>>>           stats->xdp_packets++;
>>>   @@ -695,9 +704,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
>>>               /* Recalculate length in case bpf program changed it */
>>>               delta = orig_data - xdp.data;
>>>               len = xdp.data_end - xdp.data;
>>> +            metasize = xdp.data - xdp.data_meta;
>>>               break;
>>>           case XDP_TX:
>>>               stats->xdp_tx++;
>>> +            xdp.data_meta = xdp.data;
>>>               xdpf = convert_to_xdp_frame(&xdp);
>>>               if (unlikely(!xdpf))
>>>                   goto err_xdp;
>>> @@ -736,10 +747,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
>>>       skb_reserve(skb, headroom - delta);
>>>       skb_put(skb, len);
>>>       if (!delta) {
>>> -        buf += header_offset;
>>> -        memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
>>> +        memcpy(skb_vnet_hdr(skb), buf + VIRTNET_RX_PAD, vi->hdr_len);
>>>       } /* keep zeroed vnet hdr since packet was changed by bpf */
>>>   +    if (metasize)
>>> +        skb_metadata_set(skb, metasize);
>>> +
>>>   err:
>>>       return skb;
>>>   @@ -760,8 +773,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
>>>                      struct virtnet_rq_stats *stats)
>>>   {
>>>       struct page *page = buf;
>>> -    struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
>>> -                      PAGE_SIZE, true);
>>> +    struct sk_buff *skb =
>>> +        page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
>>>         stats->bytes += len - vi->hdr_len;
>>>       if (unlikely(!skb))
>>> @@ -793,6 +806,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>>>       unsigned int truesize;
>>>       unsigned int headroom = mergeable_ctx_to_headroom(ctx);
>>>       int err;
>>> +    unsigned int metasize = 0;
>>>         head_skb = NULL;
>>>       stats->bytes += len - vi->hdr_len;
>>> @@ -839,8 +853,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>>>           data = page_address(xdp_page) + offset;
>>>           xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
>>>           xdp.data = data + vi->hdr_len;
>>> -        xdp_set_data_meta_invalid(&xdp);
>>>           xdp.data_end = xdp.data + (len - vi->hdr_len);
>>> +        xdp.data_meta = xdp.data;
>>>           xdp.rxq = &rq->xdp_rxq;
>>>             act = bpf_prog_run_xdp(xdp_prog, &xdp);
>>> @@ -852,8 +866,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>>>                * adjustments. Note other cases do not build an
>>>                * skb and avoid using offset
>>>                */
>>> -            offset = xdp.data -
>>> -                    page_address(xdp_page) - vi->hdr_len;
>>> +            metasize = xdp.data - xdp.data_meta;
>>> +            offset = xdp.data - page_address(xdp_page) -
>>> +                 vi->hdr_len - metasize;
>>>                 /* recalculate len if xdp.data or xdp.data_end were
>>>                * adjusted
>>> @@ -863,14 +878,15 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>>>               if (unlikely(xdp_page != page)) {
>>>                   rcu_read_unlock();
>>>                   put_page(page);
>>> -                head_skb = page_to_skb(vi, rq, xdp_page,
>>> -                               offset, len,
>>> -                               PAGE_SIZE, false);
>>> +                head_skb = page_to_skb(vi, rq, xdp_page, offset,
>>> +                               len, PAGE_SIZE, false,
>>> +                               metasize);
>>>                   return head_skb;
>>>               }
>>>               break;
>>>           case XDP_TX:
>>>               stats->xdp_tx++;
>>> +            xdp.data_meta = xdp.data;
>>>               xdpf = convert_to_xdp_frame(&xdp);
>>>               if (unlikely(!xdpf))
>>>                   goto err_xdp;
>>> @@ -921,7 +937,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>>>           goto err_skb;
>>>       }
>>>   -    head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
>>> +    head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
>>> +                   metasize);
>>>       curr_skb = head_skb;
>>>         if (unlikely(!curr_skb))


  reply	other threads:[~2019-07-08 22:39 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-27  8:06 [PATCH bpf-next] virtio_net: add XDP meta data support Yuya Kusakabe
2019-07-01  9:30 ` Jason Wang
2019-07-02  1:00   ` Yuya Kusakabe
2019-07-02  3:15     ` [PATCH bpf-next v2] " Yuya Kusakabe
2019-07-02  3:59       ` Jason Wang
2019-07-02  5:15         ` Yuya Kusakabe
2019-07-02  8:16           ` [PATCH bpf-next v3] " Yuya Kusakabe
2019-07-02  8:33             ` Jason Wang
2019-07-02 14:11               ` Yuya Kusakabe
2019-07-08 22:38                 ` Daniel Borkmann [this message]
2019-07-09  3:04                   ` Jason Wang
2019-07-09 20:03                     ` Daniel Borkmann
2019-07-10  2:30                       ` Jason Wang
2020-02-03 13:52                         ` Yuya Kusakabe
2020-02-04  3:31                           ` Jason Wang
2020-02-04  7:16                             ` [PATCH bpf-next v4] " Yuya Kusakabe
2020-02-05  4:10                               ` Jason Wang
2020-02-05  9:18                                 ` Yuya Kusakabe
2020-02-06  3:20                                   ` Jason Wang
2020-02-20  8:55                                     ` [PATCH bpf-next v5] " Yuya Kusakabe
2020-02-21  4:23                                       ` Jason Wang
2020-02-21  8:36                                         ` Yuya Kusakabe
2020-02-21 11:01                                           ` Michael S. Tsirkin
2020-02-23  8:14                                           ` Michael S. Tsirkin
2020-02-24  4:05                                             ` Jason Wang
2020-02-25  0:52                                               ` Yuya Kusakabe
2020-02-05  5:33                               ` [PATCH bpf-next v4] " Michael S. Tsirkin
2020-02-05  9:19                                 ` Yuya Kusakabe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a5f4601a-db0e-e65b-5b32-cc7e04ba90be@iogearbox.net \
    --to=daniel@iogearbox.net \
    --cc=ast@kernel.org \
    --cc=davem@davemloft.net \
    --cc=hawk@kernel.org \
    --cc=jakub.kicinski@netronome.com \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=songliubraving@fb.com \
    --cc=yhs@fb.com \
    --cc=yuya.kusakabe@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).