bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alexei Starovoitov <ast@fb.com>
To: Dave Marchevsky <davemarchevsky@fb.com>, bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Kernel Team <kernel-team@fb.com>, Tejun Heo <tj@kernel.org>
Subject: Re: [RFC PATCH bpf-next 06/11] bpf: Add bpf_rbtree_{lock,unlock} helpers
Date: Mon, 1 Aug 2022 14:58:55 -0700	[thread overview]
Message-ID: <93985c8f-1bcc-363e-ecf6-513b84d785ae@fb.com> (raw)
In-Reply-To: <20220722183438.3319790-7-davemarchevsky@fb.com>

On 7/22/22 11:34 AM, Dave Marchevsky wrote:
> These helpers are equivalent to bpf_spin_{lock,unlock}, but the verifier
> doesn't try to enforce that no helper calls occur when there's an active
> spin lock.
> 
> [ TODO: Currently the verifier doesn't do _anything_ spinlock related
> when it sees one of these, including setting active_spin_lock. This is
> probably too lenient. Also, EXPORT_SYMBOL for internal lock helpers
> might not be the best code structure. ]
> 
> Future patches will add enforcement of "rbtree helpers must always be
> called when lock is held" constraint.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> ---
>   include/uapi/linux/bpf.h       | 20 ++++++++++++++++++++
>   kernel/bpf/helpers.c           | 12 ++++++++++--
>   kernel/bpf/rbtree.c            | 29 +++++++++++++++++++++++++++++
>   kernel/bpf/verifier.c          |  2 ++
>   tools/include/uapi/linux/bpf.h | 20 ++++++++++++++++++++
>   5 files changed, 81 insertions(+), 2 deletions(-)
> 
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index c677d92de3bc..d21e2c99ea14 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -5391,6 +5391,24 @@ union bpf_attr {
>    *
>    *	Return
>    *		Ptr to lock
> + *
> + * void *bpf_rbtree_lock(struct bpf_spin_lock *lock)
> + *	Description
> + *		Like bpf_spin_lock helper, but use separate helper for now
> + *		as we don't want this helper to have special meaning to the verifier
> + *		so that we can do rbtree helper calls between rbtree_lock/unlock
> + *
> + *	Return
> + *		0
> + *
> + * void *bpf_rbtree_unlock(struct bpf_spin_lock *lock)
> + *	Description
> + *		Like bpf_spin_unlock helper, but use separate helper for now
> + *		as we don't want this helper to have special meaning to the verifier
> + *		so that we can do rbtree helper calls between rbtree_lock/unlock
> + *
> + *	Return
> + *		0
>    */
>   #define __BPF_FUNC_MAPPER(FN)		\
>   	FN(unspec),			\
> @@ -5607,6 +5625,8 @@ union bpf_attr {
>   	FN(rbtree_remove),		\
>   	FN(rbtree_free_node),		\
>   	FN(rbtree_get_lock),		\
> +	FN(rbtree_lock),		\
> +	FN(rbtree_unlock),		\
>   	/* */
>   
>   /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 257a808bb767..fa2dba1dcec8 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -303,7 +303,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
>   
>   static DEFINE_PER_CPU(unsigned long, irqsave_flags);
>   
> -static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
> +inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
>   {
>   	unsigned long flags;
>   
> @@ -311,6 +311,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
>   	__bpf_spin_lock(lock);
>   	__this_cpu_write(irqsave_flags, flags);
>   }
> +EXPORT_SYMBOL(__bpf_spin_lock_irqsave);

what is it for?
It's not used out of modules.

>   
>   notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
>   {
> @@ -325,7 +326,7 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
>   	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
>   };
>   
> -static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
> +inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
>   {
>   	unsigned long flags;
>   
> @@ -333,6 +334,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
>   	__bpf_spin_unlock(lock);
>   	local_irq_restore(flags);
>   }
> +EXPORT_SYMBOL(__bpf_spin_unlock_irqrestore);
>   
>   notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
>   {
> @@ -1588,6 +1590,8 @@ const struct bpf_func_proto bpf_rbtree_find_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_remove_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_free_node_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_get_lock_proto __weak;
> +const struct bpf_func_proto bpf_rbtree_lock_proto __weak;
> +const struct bpf_func_proto bpf_rbtree_unlock_proto __weak;
>   
>   const struct bpf_func_proto *
>   bpf_base_func_proto(enum bpf_func_id func_id)
> @@ -1689,6 +1693,10 @@ bpf_base_func_proto(enum bpf_func_id func_id)
>   		return &bpf_rbtree_free_node_proto;
>   	case BPF_FUNC_rbtree_get_lock:
>   		return &bpf_rbtree_get_lock_proto;
> +	case BPF_FUNC_rbtree_lock:
> +		return &bpf_rbtree_lock_proto;
> +	case BPF_FUNC_rbtree_unlock:
> +		return &bpf_rbtree_unlock_proto;
>   	default:
>   		break;
>   	}
> diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
> index c6f0a2a083f6..bf2e30af82ec 100644
> --- a/kernel/bpf/rbtree.c
> +++ b/kernel/bpf/rbtree.c
> @@ -262,6 +262,35 @@ const struct bpf_func_proto bpf_rbtree_get_lock_proto = {
>   	.arg1_type = ARG_CONST_MAP_PTR,
>   };
>   
> +extern void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock);
> +extern void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock);
> +
> +BPF_CALL_1(bpf_rbtree_lock, void *, lock)
> +{
> +	__bpf_spin_lock_irqsave((struct bpf_spin_lock *)lock);
> +	return 0;
> +}

it doesn't have to be bpf_spin_lock.
Normal spin_lock will do.
bpf_spin_lock has specific size requirement, so when it's used inside
map value the value size doesn't change from kernel to kernel.
Since this lock is hidden it can be any lock.

Also it needs to remember current task or something.
Just spin_is_locked() from bpf_rbtree_add() is not enough.
Instead of remembering current we can pass hidden 'prog' pointer
and remember that in bpf_rbtree_lock.
Also pass that hidden prog ptr to add/remove/find and compare.
But probably overkill. Current task should be fine.


  reply	other threads:[~2022-08-01 21:59 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-22 18:34 [RFC PATCH bpf-next 00/11] bpf: Introduce rbtree map Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 01/11] bpf: Pull repeated reg access bounds check into helper fn Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 02/11] bpf: Add verifier support for custom callback return range Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 03/11] bpf: Add rb_node_off to bpf_map Dave Marchevsky
2022-08-01 22:19   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 04/11] bpf: Add rbtree map Dave Marchevsky
2022-08-01 21:49   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 05/11] bpf: Add bpf_spin_lock member to rbtree Dave Marchevsky
2022-08-01 22:17   ` Alexei Starovoitov
2022-08-02 13:59     ` Kumar Kartikeya Dwivedi
2022-08-02 15:30       ` Alexei Starovoitov
2022-08-10 21:46     ` Kumar Kartikeya Dwivedi
2022-08-10 22:06       ` Alexei Starovoitov
2022-08-10 23:16         ` Kumar Kartikeya Dwivedi
2022-08-15  5:33       ` Yonghong Song
2022-08-15  5:37         ` Kumar Kartikeya Dwivedi
2022-07-22 18:34 ` [RFC PATCH bpf-next 06/11] bpf: Add bpf_rbtree_{lock,unlock} helpers Dave Marchevsky
2022-08-01 21:58   ` Alexei Starovoitov [this message]
2022-07-22 18:34 ` [RFC PATCH bpf-next 07/11] bpf: Enforce spinlock hold for bpf_rbtree_{add,remove,find} Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 08/11] bpf: Add OBJ_NON_OWNING_REF type flag Dave Marchevsky
2022-08-01 22:41   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 09/11] bpf: Add CONDITIONAL_RELEASE " Dave Marchevsky
2022-08-01 22:23   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 10/11] bpf: Introduce PTR_ITER and PTR_ITER_END type flags Dave Marchevsky
2022-07-29 16:31   ` Tejun Heo
2022-08-01 22:44   ` Alexei Starovoitov
2022-08-02 13:05     ` Kumar Kartikeya Dwivedi
2022-08-02 15:10       ` Alexei Starovoitov
2022-08-10 17:56     ` Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 11/11] selftests/bpf: Add rbtree map tests Dave Marchevsky
2022-07-28  7:18   ` Yonghong Song
2022-08-10 17:48     ` Dave Marchevsky
2022-07-28  7:04 ` [RFC PATCH bpf-next 00/11] bpf: Introduce rbtree map Yonghong Song
2022-08-10 17:54   ` Dave Marchevsky
2022-08-01 21:27 ` Alexei Starovoitov
2022-08-10 18:11   ` Dave Marchevsky
2022-08-02 22:02 ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=93985c8f-1bcc-363e-ecf6-513b84d785ae@fb.com \
    --to=ast@fb.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davemarchevsky@fb.com \
    --cc=kernel-team@fb.com \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).