All of lore.kernel.org
 help / color / mirror / Atom feed
From: Martin KaFai Lau <kafai@fb.com>
To: Hangbin Liu <liuhangbin@gmail.com>
Cc: bpf@vger.kernel.org, netdev@vger.kernel.org,
	"Toke Høiland-Jørgensen" <toke@redhat.com>,
	"Jiri Benc" <jbenc@redhat.com>,
	"Jesper Dangaard Brouer" <brouer@redhat.com>,
	"Eelco Chaudron" <echaudro@redhat.com>,
	ast@kernel.org, "Daniel Borkmann" <daniel@iogearbox.net>,
	"Lorenzo Bianconi" <lorenzo.bianconi@redhat.com>,
	"David Ahern" <dsahern@gmail.com>,
	"Andrii Nakryiko" <andrii.nakryiko@gmail.com>,
	"Alexei Starovoitov" <alexei.starovoitov@gmail.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Björn Töpel" <bjorn.topel@gmail.com>
Subject: Re: [PATCHv7 bpf-next 1/4] bpf: run devmap xdp_prog on flush instead of bulk enqueue
Date: Wed, 14 Apr 2021 17:17:11 -0700	[thread overview]
Message-ID: <20210415001711.dpbt2lej75ry6v7a@kafai-mbp.dhcp.thefacebook.com> (raw)
In-Reply-To: <20210414122610.4037085-2-liuhangbin@gmail.com>

On Wed, Apr 14, 2021 at 08:26:07PM +0800, Hangbin Liu wrote:
[ ... ]

> diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
> index aa516472ce46..3980fb3bfb09 100644
> --- a/kernel/bpf/devmap.c
> +++ b/kernel/bpf/devmap.c
> @@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue {
>  	struct list_head flush_node;
>  	struct net_device *dev;
>  	struct net_device *dev_rx;
> +	struct bpf_prog *xdp_prog;
>  	unsigned int count;
>  };
>  
> @@ -326,22 +327,71 @@ bool dev_map_can_have_prog(struct bpf_map *map)
>  	return false;
>  }
>  
> +static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
> +				struct xdp_frame **frames, int n,
> +				struct net_device *dev)
> +{
> +	struct xdp_txq_info txq = { .dev = dev };
> +	struct xdp_buff xdp;
> +	int i, nframes = 0;
> +
> +	for (i = 0; i < n; i++) {
> +		struct xdp_frame *xdpf = frames[i];
> +		u32 act;
> +		int err;
> +
> +		xdp_convert_frame_to_buff(xdpf, &xdp);
> +		xdp.txq = &txq;
> +
> +		act = bpf_prog_run_xdp(xdp_prog, &xdp);
> +		switch (act) {
> +		case XDP_PASS:
> +			err = xdp_update_frame_from_buff(&xdp, xdpf);
> +			if (unlikely(err < 0))
> +				xdp_return_frame_rx_napi(xdpf);
> +			else
> +				frames[nframes++] = xdpf;
> +			break;
> +		default:
> +			bpf_warn_invalid_xdp_action(act);
> +			fallthrough;
> +		case XDP_ABORTED:
> +			trace_xdp_exception(dev, xdp_prog, act);
> +			fallthrough;
> +		case XDP_DROP:
> +			xdp_return_frame_rx_napi(xdpf);
> +			break;
> +		}
> +	}
> +	return nframes; /* sent frames count */
> +}
> +
>  static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
>  {
>  	struct net_device *dev = bq->dev;
> -	int sent = 0, err = 0;
> +	int sent = 0, drops = 0, err = 0;
> +	unsigned int cnt = bq->count;
> +	int to_send = cnt;
>  	int i;
>  
> -	if (unlikely(!bq->count))
> +	if (unlikely(!cnt))
>  		return;
>  
> -	for (i = 0; i < bq->count; i++) {
> +	for (i = 0; i < cnt; i++) {
>  		struct xdp_frame *xdpf = bq->q[i];
>  
>  		prefetch(xdpf);
>  	}
>  
> -	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
> +	if (bq->xdp_prog) {
bq->xdp_prog is used here

> +		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
> +		if (!to_send)
> +			goto out;
> +
> +		drops = cnt - to_send;
> +	}
> +

[ ... ]

>  static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
> -		       struct net_device *dev_rx)
> +		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
>  {
>  	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
>  	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
> @@ -412,18 +466,22 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
>  	/* Ingress dev_rx will be the same for all xdp_frame's in
>  	 * bulk_queue, because bq stored per-CPU and must be flushed
>  	 * from net_device drivers NAPI func end.
> +	 *
> +	 * Do the same with xdp_prog and flush_list since these fields
> +	 * are only ever modified together.
>  	 */
> -	if (!bq->dev_rx)
> +	if (!bq->dev_rx) {
>  		bq->dev_rx = dev_rx;
> +		bq->xdp_prog = xdp_prog;
bp->xdp_prog is assigned here and could be used later in bq_xmit_all().
How is bq->xdp_prog protected? Are they all under one rcu_read_lock()?
It is not very obvious after taking a quick look at xdp_do_flush[_map].

e.g. what if the devmap elem gets deleted.

[ ... ]

>  static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
> -			       struct net_device *dev_rx)
> +				struct net_device *dev_rx,
> +				struct bpf_prog *xdp_prog)
>  {
>  	struct xdp_frame *xdpf;
>  	int err;
> @@ -439,42 +497,14 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
>  	if (unlikely(!xdpf))
>  		return -EOVERFLOW;
>  
> -	bq_enqueue(dev, xdpf, dev_rx);
> +	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
>  	return 0;
>  }
>  
[ ... ]

> @@ -482,12 +512,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
>  {
>  	struct net_device *dev = dst->dev;
>  
> -	if (dst->xdp_prog) {
> -		xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
> -		if (!xdp)
> -			return 0;
> -	}
> -	return __xdp_enqueue(dev, xdp, dev_rx);
> +	return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
>  }

  reply	other threads:[~2021-04-15  0:17 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-14 12:26 [PATCHv7 bpf-next 0/4] xdp: extend xdp_redirect_map with broadcast support Hangbin Liu
2021-04-14 12:26 ` [PATCHv7 bpf-next 1/4] bpf: run devmap xdp_prog on flush instead of bulk enqueue Hangbin Liu
2021-04-15  0:17   ` Martin KaFai Lau [this message]
2021-04-15  2:37     ` Hangbin Liu
2021-04-15  9:22       ` Toke Høiland-Jørgensen
2021-04-15 17:35         ` Martin KaFai Lau
2021-04-15 18:21           ` Jesper Dangaard Brouer
2021-04-15 20:29             ` Toke Høiland-Jørgensen
2021-04-16  0:39               ` Martin KaFai Lau
2021-04-16 10:03                 ` Toke Høiland-Jørgensen
2021-04-16 18:20                   ` Martin KaFai Lau
2021-04-16 13:45                 ` Jesper Dangaard Brouer
2021-04-16 14:35                   ` Toke Høiland-Jørgensen
2021-04-16 18:22                   ` Martin KaFai Lau
2021-04-17  0:23                     ` Paul E. McKenney
2021-04-17 12:27                       ` Toke Høiland-Jørgensen
2021-04-19 16:58                         ` Paul E. McKenney
2021-04-19 18:12                           ` Toke Høiland-Jørgensen
2021-04-19 18:32                             ` Paul E. McKenney
2021-04-19 21:21                               ` Toke Høiland-Jørgensen
2021-04-19 21:41                                 ` Paul E. McKenney
2021-04-19 22:16                                   ` Toke Høiland-Jørgensen
2021-04-19 22:31                                     ` Paul E. McKenney
2021-04-21 14:24                                       ` Toke Høiland-Jørgensen
2021-04-21 14:59                                         ` Paul E. McKenney
2021-04-21 19:59                                           ` Toke Høiland-Jørgensen
2021-04-21 20:51                                             ` Paul E. McKenney
2021-04-21 21:10                                               ` Toke Høiland-Jørgensen
2021-04-21 21:30                                                 ` Paul E. McKenney
2021-04-21 22:00                                                   ` Toke Høiland-Jørgensen
2021-04-21 22:31                                                     ` Paul E. McKenney
2021-04-22 14:30                                                       ` Toke Høiland-Jørgensen
2021-04-14 12:26 ` [PATCHv7 bpf-next 2/4] xdp: extend xdp_redirect_map with broadcast support Hangbin Liu
2021-04-15  0:23   ` Martin KaFai Lau
2021-04-15  2:21     ` Hangbin Liu
2021-04-15  9:29       ` Toke Høiland-Jørgensen
2021-04-14 12:26 ` [PATCHv7 bpf-next 3/4] sample/bpf: add xdp_redirect_map_multi for redirect_map broadcast test Hangbin Liu
2021-04-14 12:26 ` [PATCHv7 bpf-next 4/4] selftests/bpf: add xdp_redirect_multi test Hangbin Liu
2021-04-14 14:16 ` [PATCHv7 bpf-next 0/4] xdp: extend xdp_redirect_map with broadcast support Toke Høiland-Jørgensen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210415001711.dpbt2lej75ry6v7a@kafai-mbp.dhcp.thefacebook.com \
    --to=kafai@fb.com \
    --cc=alexei.starovoitov@gmail.com \
    --cc=andrii.nakryiko@gmail.com \
    --cc=ast@kernel.org \
    --cc=bjorn.topel@gmail.com \
    --cc=bpf@vger.kernel.org \
    --cc=brouer@redhat.com \
    --cc=daniel@iogearbox.net \
    --cc=dsahern@gmail.com \
    --cc=echaudro@redhat.com \
    --cc=jbenc@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=liuhangbin@gmail.com \
    --cc=lorenzo.bianconi@redhat.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=toke@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.