All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maxim Mikityanskiy <maxtram95@gmail.com>
To: Eduard Zingerman <eddyz87@gmail.com>,
	Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>,
	Martin KaFai Lau <martin.lau@linux.dev>,
	Song Liu <song@kernel.org>,
	Yonghong Song <yonghong.song@linux.dev>,
	KP Singh <kpsingh@kernel.org>,
	Stanislav Fomichev <sdf@google.com>, Hao Luo <haoluo@google.com>,
	Jiri Olsa <jolsa@kernel.org>, Mykola Lysenko <mykolal@fb.com>,
	Shuah Khan <shuah@kernel.org>,
	"David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>,
	Jesper Dangaard Brouer <hawk@kernel.org>,
	bpf@vger.kernel.org, linux-kselftest@vger.kernel.org,
	netdev@vger.kernel.org
Subject: [PATCH bpf-next 14/15] bpf: Optimize state pruning for spilled scalars
Date: Wed, 20 Dec 2023 23:40:12 +0200	[thread overview]
Message-ID: <20231220214013.3327288-15-maxtram95@gmail.com> (raw)
In-Reply-To: <20231220214013.3327288-1-maxtram95@gmail.com>

From: Eduard Zingerman <eddyz87@gmail.com>

Changes for scalar ID tracking of spilled unbound scalars lead to
certain verification performance regression. This commit mitigates the
regression by exploiting the following properties maintained by
check_stack_read_fixed_off():
- a mix of STACK_MISC, STACK_ZERO and STACK_INVALID marks is read as
  unbounded scalar register;
- spi with all slots marked STACK_ZERO is read as scalar register with
  value zero.

This commit modifies stacksafe() to consider situations above
equivalent.

Veristat results after this patch show significant gains:

$ ./veristat -e file,prog,states -f '!states_pct<10' -f '!states_b<10' -C not-opt after
File              Program   States (A)  States (B)  States    (DIFF)
----------------  --------  ----------  ----------  ----------------
pyperf180.bpf.o   on_event       10456        8422   -2034 (-19.45%)
pyperf600.bpf.o   on_event       37319       22519  -14800 (-39.66%)
strobemeta.bpf.o  on_event       13435        4703   -8732 (-64.99%)

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 kernel/bpf/verifier.c | 83 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 83 insertions(+)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b6e252539e52..a020d4d83524 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1168,6 +1168,12 @@ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
 	*stype = STACK_MISC;
 }
 
+static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
+{
+	return stack->slot_type[0] == STACK_SPILL &&
+	       stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
 static void scrub_spilled_slot(u8 *stype)
 {
 	if (*stype != STACK_INVALID)
@@ -16449,11 +16455,45 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
 	}
 }
 
+static bool is_stack_zero64(struct bpf_stack_state *stack)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
+		if (stack->slot_type[i] != STACK_ZERO)
+			return false;
+	return true;
+}
+
+static bool is_stack_unbound_slot64(struct bpf_verifier_env *env,
+				    struct bpf_stack_state *stack)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
+		if (stack->slot_type[i] != STACK_ZERO &&
+		    stack->slot_type[i] != STACK_MISC &&
+		    (!env->allow_uninit_stack || stack->slot_type[i] != STACK_INVALID))
+			return false;
+	return true;
+}
+
+static bool is_spilled_unbound_scalar_reg64(struct bpf_stack_state *stack)
+{
+	return is_spilled_scalar_reg64(stack) && __is_scalar_unbounded(&stack->spilled_ptr);
+}
+
 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
 		      struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
 {
+	struct bpf_reg_state unbound_reg = {};
+	struct bpf_reg_state zero_reg = {};
 	int i, spi;
 
+	__mark_reg_unknown(env, &unbound_reg);
+	__mark_reg_const_zero(env, &zero_reg);
+	zero_reg.precise = true;
+
 	/* walk slots of the explored stack and ignore any additional
 	 * slots in the current stack, since explored(safe) state
 	 * didn't use them
@@ -16474,6 +16514,49 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
 			continue;
 		}
 
+		/* load of stack value with all MISC and ZERO slots produces unbounded
+		 * scalar value, call regsafe to ensure scalar ids are compared.
+		 */
+		if (is_spilled_unbound_scalar_reg64(&old->stack[spi]) &&
+		    is_stack_unbound_slot64(env, &cur->stack[spi])) {
+			i += BPF_REG_SIZE - 1;
+			if (!regsafe(env, &old->stack[spi].spilled_ptr, &unbound_reg,
+				     idmap, exact))
+				return false;
+			continue;
+		}
+
+		if (is_stack_unbound_slot64(env, &old->stack[spi]) &&
+		    is_spilled_unbound_scalar_reg64(&cur->stack[spi])) {
+			i += BPF_REG_SIZE - 1;
+			if (!regsafe(env,  &unbound_reg, &cur->stack[spi].spilled_ptr,
+				     idmap, exact))
+				return false;
+			continue;
+		}
+
+		/* load of stack value with all ZERO slots produces scalar value 0,
+		 * call regsafe to ensure scalar ids are compared and precision
+		 * flags are taken into account.
+		 */
+		if (is_spilled_scalar_reg64(&old->stack[spi]) &&
+		    is_stack_zero64(&cur->stack[spi])) {
+			if (!regsafe(env, &old->stack[spi].spilled_ptr, &zero_reg,
+				     idmap, exact))
+				return false;
+			i += BPF_REG_SIZE - 1;
+			continue;
+		}
+
+		if (is_stack_zero64(&old->stack[spi]) &&
+		    is_spilled_scalar_reg64(&cur->stack[spi])) {
+			if (!regsafe(env, &zero_reg, &cur->stack[spi].spilled_ptr,
+				     idmap, exact))
+				return false;
+			i += BPF_REG_SIZE - 1;
+			continue;
+		}
+
 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
 			continue;
 
-- 
2.42.1


  parent reply	other threads:[~2023-12-20 21:40 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-20 21:39 [PATCH bpf-next 00/15] Improvements for tracking scalars in the BPF verifier Maxim Mikityanskiy
2023-12-20 21:39 ` [PATCH bpf-next 01/15] selftests/bpf: Fix the u64_offset_to_skb_data test Maxim Mikityanskiy
2023-12-26  9:52   ` Shung-Hsi Yu
2023-12-26 10:38     ` Maxim Mikityanskiy
2023-12-26 13:22       ` Shung-Hsi Yu
2024-01-04  2:22   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 02/15] bpf: make infinite loop detection in is_state_visited() exact Maxim Mikityanskiy
2023-12-20 21:40 ` [PATCH bpf-next 03/15] selftests/bpf: check if imprecise stack spills confuse infinite loop detection Maxim Mikityanskiy
2023-12-20 21:40 ` [PATCH bpf-next 04/15] bpf: Make bpf_for_each_spilled_reg consider narrow spills Maxim Mikityanskiy
2024-01-04  2:25   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 05/15] selftests/bpf: Add a test case for 32-bit spill tracking Maxim Mikityanskiy
2024-01-04  2:25   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 06/15] bpf: Add the assign_scalar_id_before_mov function Maxim Mikityanskiy
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 07/15] bpf: Add the get_reg_width function Maxim Mikityanskiy
2023-12-20 21:40 ` [PATCH bpf-next 08/15] bpf: Assign ID to scalars on spill Maxim Mikityanskiy
2023-12-25  3:15   ` Alexei Starovoitov
2023-12-25 21:11     ` Maxim Mikityanskiy
2023-12-25 21:26       ` Alexei Starovoitov
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 09/15] selftests/bpf: Test assigning " Maxim Mikityanskiy
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 10/15] bpf: Track spilled unbounded scalars Maxim Mikityanskiy
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 11/15] selftests/bpf: Test tracking " Maxim Mikityanskiy
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` [PATCH bpf-next 12/15] bpf: Preserve boundaries and track scalars on narrowing fill Maxim Mikityanskiy
2023-12-26  5:29   ` Shung-Hsi Yu
2024-01-04  2:27   ` Eduard Zingerman
2024-01-05 17:48     ` Maxim Mikityanskiy
2023-12-20 21:40 ` [PATCH bpf-next 13/15] selftests/bpf: Add test cases for " Maxim Mikityanskiy
2024-01-04  2:26   ` Eduard Zingerman
2023-12-20 21:40 ` Maxim Mikityanskiy [this message]
2023-12-20 21:40 ` [PATCH bpf-next 15/15] selftests/bpf: states pruning checks for scalar vs STACK_{MISC,ZERO} Maxim Mikityanskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231220214013.3327288-15-maxtram95@gmail.com \
    --to=maxtram95@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=eddyz87@gmail.com \
    --cc=haoluo@google.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kpsingh@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=mykolal@fb.com \
    --cc=netdev@vger.kernel.org \
    --cc=sdf@google.com \
    --cc=shuah@kernel.org \
    --cc=song@kernel.org \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.