All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Borkmann <daniel@iogearbox.net>
To: Dmitry Yakunin <zeil@yandex-team.ru>,
	alexei.starovoitov@gmail.com, netdev@vger.kernel.org,
	bpf@vger.kernel.org
Cc: sdf@google.com
Subject: Re: [PATCH bpf-next v3 3/4] bpf: export some cgroup storages allocation helpers for reusing
Date: Thu, 16 Jul 2020 21:46:00 +0200	[thread overview]
Message-ID: <ab6460c2-1c01-3471-4368-1ddb19fa3695@iogearbox.net> (raw)
In-Reply-To: <20200715195132.4286-4-zeil@yandex-team.ru>

On 7/15/20 9:51 PM, Dmitry Yakunin wrote:
> This patch exports bpf_cgroup_storages_alloc and bpf_cgroup_storages_free
> helpers to the header file and reuses them in bpf_test_run.
> 
> v2:
>    - fix build without CONFIG_CGROUP_BPF (kernel test robot <lkp@intel.com>)
> 
> Signed-off-by: Dmitry Yakunin <zeil@yandex-team.ru>
> ---
>   include/linux/bpf-cgroup.h | 36 ++++++++++++++++++++++++++++++++++++
>   kernel/bpf/cgroup.c        | 25 -------------------------
>   net/bpf/test_run.c         | 16 ++++------------
>   3 files changed, 40 insertions(+), 37 deletions(-)
> 
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index 2c6f266..5c10fe6 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -175,6 +175,33 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
>   int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
>   				     void *value, u64 flags);
>   
> +static inline void bpf_cgroup_storages_free(struct bpf_cgroup_storage
> +					    *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> +{
> +	enum bpf_cgroup_storage_type stype;
> +
> +	for_each_cgroup_storage_type(stype)
> +		bpf_cgroup_storage_free(storage[stype]);
> +}
> +
> +static inline int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage
> +					    *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> +					    struct bpf_prog *prog)
> +{
> +	enum bpf_cgroup_storage_type stype;
> +
> +	for_each_cgroup_storage_type(stype) {
> +		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
> +		if (IS_ERR(storage[stype])) {
> +			storage[stype] = NULL;
> +			bpf_cgroup_storages_free(storage);
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>   /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
>   #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
>   ({									      \
> @@ -398,6 +425,15 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
>   	return 0;
>   }
>   
> +static inline void bpf_cgroup_storages_free(
> +	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
> +
> +static inline int bpf_cgroup_storages_alloc(
> +	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> +	struct bpf_prog *prog) {
> +	return 0;
> +}
> +
>   #define cgroup_bpf_enabled (0)
>   #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
>   #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index ac53102..e4c2792 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -28,31 +28,6 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
>   	percpu_ref_kill(&cgrp->bpf.refcnt);
>   }
>   
> -static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
> -{
> -	enum bpf_cgroup_storage_type stype;
> -
> -	for_each_cgroup_storage_type(stype)
> -		bpf_cgroup_storage_free(storages[stype]);
> -}
> -
> -static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
> -				     struct bpf_prog *prog)
> -{
> -	enum bpf_cgroup_storage_type stype;
> -
> -	for_each_cgroup_storage_type(stype) {
> -		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
> -		if (IS_ERR(storages[stype])) {
> -			storages[stype] = NULL;
> -			bpf_cgroup_storages_free(storages);
> -			return -ENOMEM;
> -		}
> -	}
> -
> -	return 0;
> -}
> -

nit: Can't we just export them from here instead of inlining? Given this is for
test_run.c anyway, I don't think it's worth the extra churn.

>   static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
>   				       struct bpf_cgroup_storage *src[])
>   {
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index 0e92973..050390d 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -19,20 +19,13 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
>   			u32 *retval, u32 *time, bool xdp)
>   {
>   	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
> -	enum bpf_cgroup_storage_type stype;
>   	u64 time_start, time_spent = 0;
>   	int ret = 0;
>   	u32 i;
>   
> -	for_each_cgroup_storage_type(stype) {
> -		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
> -		if (IS_ERR(storage[stype])) {
> -			storage[stype] = NULL;
> -			for_each_cgroup_storage_type(stype)
> -				bpf_cgroup_storage_free(storage[stype]);
> -			return -ENOMEM;
> -		}
> -	}
> +	ret = bpf_cgroup_storages_alloc(storage, prog);
> +	if (ret)
> +		return ret;
>   
>   	if (!repeat)
>   		repeat = 1;
> @@ -72,8 +65,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
>   	do_div(time_spent, repeat);
>   	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
>   
> -	for_each_cgroup_storage_type(stype)
> -		bpf_cgroup_storage_free(storage[stype]);
> +	bpf_cgroup_storages_free(storage);
>   
>   	return ret;
>   }
> 


  reply	other threads:[~2020-07-16 19:46 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15 19:51 [PATCH bpf-next v3 0/4] bpf: cgroup skb improvements for bpf_prog_test_run Dmitry Yakunin
2020-07-15 19:51 ` [PATCH bpf-next v3 1/4] bpf: setup socket family and addresses in bpf_prog_test_run_skb Dmitry Yakunin
2020-07-15 19:51 ` [PATCH bpf-next v3 2/4] bpf: allow to specify ifindex for skb " Dmitry Yakunin
2020-07-16 19:42   ` Daniel Borkmann
2020-07-15 19:51 ` [PATCH bpf-next v3 3/4] bpf: export some cgroup storages allocation helpers for reusing Dmitry Yakunin
2020-07-16 19:46   ` Daniel Borkmann [this message]
2020-07-15 19:51 ` [PATCH bpf-next v3 4/4] bpf: try to use existing cgroup storage in bpf_prog_test_run_skb Dmitry Yakunin
2020-07-16 20:18   ` Daniel Borkmann
2020-07-21 11:06     ` Dmitry Yakunin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ab6460c2-1c01-3471-4368-1ddb19fa3695@iogearbox.net \
    --to=daniel@iogearbox.net \
    --cc=alexei.starovoitov@gmail.com \
    --cc=bpf@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=sdf@google.com \
    --cc=zeil@yandex-team.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.