All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joanne Koong <joannekoong@fb.com>
To: <bpf@vger.kernel.org>
Cc: Joanne Koong <joannekoong@fb.com>
Subject: [PATCH bpf-next 4/5] bpf/benchs: Add benchmark test for bloom filter maps
Date: Tue, 31 Aug 2021 15:50:04 -0700	[thread overview]
Message-ID: <20210831225005.2762202-5-joannekoong@fb.com> (raw)
In-Reply-To: <20210831225005.2762202-1-joannekoong@fb.com>

This patch adds benchmark tests for the throughput and false positive
rate of bloom filter map lookups for a given number of entries and a
given number of hash functions.

These benchmarks show that as the number of hash functions increases,
the throughput and the false positive rate of the bloom filter map
decreases. From the benchmark data, the approximate average
false-positive rates are roughly as follows:

1 hash function = ~30%
2 hash functions = ~15%
3 hash functions = ~5%
4 hash functions = ~2.5%
5 hash functions = ~1%
6 hash functions = 0.5%
7 hash functions  = ~0.35%
8 hash functions = ~0.15%
9 hash functions = ~0.1%
10 hash functions = ~0%

Signed-off-by: Joanne Koong <joannekoong@fb.com>
---
 tools/testing/selftests/bpf/Makefile          |   4 +-
 tools/testing/selftests/bpf/bench.c           |  35 ++
 tools/testing/selftests/bpf/bench.h           |   3 +
 .../bpf/benchs/bench_bloom_filter_map.c       | 344 ++++++++++++++++++
 .../bpf/benchs/run_bench_bloom_filter_map.sh  |  28 ++
 .../bpf/benchs/run_bench_ringbufs.sh          |  30 +-
 .../selftests/bpf/benchs/run_common.sh        |  48 +++
 .../selftests/bpf/progs/bloom_filter_map.c    |  74 ++++
 8 files changed, 537 insertions(+), 29 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
 create mode 100755 tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh
 create mode 100644 tools/testing/selftests/bpf/benchs/run_common.sh

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 866531c08e4f..3576fdff117c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -519,13 +519,15 @@ $(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
 $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
 $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
 			    $(OUTPUT)/perfbuf_bench.skel.h
+$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_map.skel.h
 $(OUTPUT)/bench.o: bench.h testing_helpers.h
 $(OUTPUT)/bench: LDLIBS += -lm
 $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
 		 $(OUTPUT)/bench_count.o \
 		 $(OUTPUT)/bench_rename.o \
 		 $(OUTPUT)/bench_trigger.o \
-		 $(OUTPUT)/bench_ringbufs.o
+		 $(OUTPUT)/bench_ringbufs.o \
+		 $(OUTPUT)/bench_bloom_filter_map.o
 	$(call msg,BINARY,,$@)
 	$(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
 
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 6ea15b93a2f8..0bcbdb4405a3 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -51,6 +51,35 @@ void setup_libbpf()
 		fprintf(stderr, "failed to increase RLIMIT_MEMLOCK: %d", err);
 }
 
+void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+	long total = res->false_hits  + res->hits + res->drops;
+
+	printf("Iter %3d (%7.3lfus): ",
+	       iter, (delta_ns - 1000000000) / 1000.0);
+
+	printf("%ld false hits of %ld total operations. Percentage = %2.2f %%\n",
+	       res->false_hits, total, ((float)res->false_hits / total) * 100);
+}
+
+void false_hits_report_final(struct bench_res res[], int res_cnt)
+{
+	long total_hits = 0, total_drops = 0, total_false_hits = 0, total_ops = 0;
+	int i;
+
+	for (i = 0; i < res_cnt; i++) {
+		total_hits += res[i].hits;
+		total_false_hits += res[i].false_hits;
+		total_drops += res[i].drops;
+	}
+	total_ops = total_hits + total_false_hits + total_drops;
+
+	printf("Summary: %ld false hits of %ld total operations. ",
+	       total_false_hits, total_ops);
+	printf("Percentage =  %2.2f %%\n",
+	       ((float)total_false_hits / total_ops) * 100);
+}
+
 void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
 {
 	double hits_per_sec, drops_per_sec;
@@ -132,9 +161,11 @@ static const struct argp_option opts[] = {
 };
 
 extern struct argp bench_ringbufs_argp;
+extern struct argp bench_bloom_filter_map_argp;
 
 static const struct argp_child bench_parsers[] = {
 	{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
+	{ &bench_bloom_filter_map_argp, 0, "Bloom filter map benchmark", 0 },
 	{},
 };
 
@@ -323,6 +354,8 @@ extern const struct bench bench_rb_libbpf;
 extern const struct bench bench_rb_custom;
 extern const struct bench bench_pb_libbpf;
 extern const struct bench bench_pb_custom;
+extern const struct bench bench_bloom_filter_map;
+extern const struct bench bench_bloom_filter_false_positive;
 
 static const struct bench *benchs[] = {
 	&bench_count_global,
@@ -344,6 +377,8 @@ static const struct bench *benchs[] = {
 	&bench_rb_custom,
 	&bench_pb_libbpf,
 	&bench_pb_custom,
+	&bench_bloom_filter_map,
+	&bench_bloom_filter_false_positive,
 };
 
 static void setup_benchmark()
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index c1f48a473b02..624c6b11501f 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -33,6 +33,7 @@ struct env {
 struct bench_res {
 	long hits;
 	long drops;
+	long false_hits;
 };
 
 struct bench {
@@ -56,6 +57,8 @@ extern const struct bench *bench;
 void setup_libbpf();
 void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns);
 void hits_drops_report_final(struct bench_res res[], int res_cnt);
+void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
+void false_hits_report_final(struct bench_res res[], int res_cnt);
 
 static inline __u64 get_time_ns() {
 	struct timespec t;
diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
new file mode 100644
index 000000000000..dadca2a6e900
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <argp.h>
+#include <pthread.h>
+#include "bench.h"
+#include "bloom_filter_map.skel.h"
+#include "bpf_util.h"
+
+static struct ctx {
+	struct bloom_filter_map *skel;
+	pthread_mutex_t map_done_mtx;
+	pthread_cond_t map_done;
+	bool map_prepare_err;
+	__u32 next_map_idx;
+} ctx = {
+	.map_done_mtx = PTHREAD_MUTEX_INITIALIZER,
+	.map_done = PTHREAD_COND_INITIALIZER,
+};
+
+static struct {
+	__u64 nr_entries;
+	__u64 nr_hashes;
+} args = {
+	.nr_entries = 1000,
+	.nr_hashes = 1,
+};
+
+enum {
+	ARG_NR_ENTRIES = 3000,
+	ARG_NR_HASHES = 3001,
+};
+
+static const struct argp_option opts[] = {
+	{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+		"Set number of entries in the bloom filter map"},
+	{ "nr_hashes", ARG_NR_HASHES, "NR_HASHES", 0,
+		"Set number of hashes in the bloom filter map"},
+	{},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+	switch (key) {
+	case ARG_NR_ENTRIES:
+		args.nr_entries = strtol(arg, NULL, 10);
+		if (args.nr_entries == 0) {
+			fprintf(stderr, "Invalid nr_entries count.");
+			argp_usage(state);
+		}
+		break;
+	case ARG_NR_HASHES:
+		args.nr_hashes = strtol(arg, NULL, 10);
+		if (args.nr_hashes == 0) {
+			fprintf(stderr, "Invalid nr_hashes count.");
+			argp_usage(state);
+		}
+		break;
+	default:
+		return ARGP_ERR_UNKNOWN;
+	}
+
+	return 0;
+}
+
+/* exported into benchmark runner */
+const struct argp bench_bloom_filter_map_argp = {
+	.options = opts,
+	.parser = parse_arg,
+};
+
+static void validate(void)
+{
+	if (env.consumer_cnt != 1) {
+		fprintf(stderr, "bloom filter map benchmark doesn't support multi-consumer!\n");
+		exit(1);
+	}
+}
+
+static inline void trigger_bpf_program(void)
+{
+	syscall(__NR_getpgid);
+}
+
+static void *producer(void *input)
+{
+	while (true)
+		trigger_bpf_program();
+
+	return NULL;
+}
+
+static void *map_prepare_thread(void *arg)
+{
+	int err, random_data_fd, bloom_filter_fd, hashmap_fd;
+	__u64 i, val;
+
+	bloom_filter_fd = bpf_map__fd(ctx.skel->maps.map_bloom_filter);
+	random_data_fd = bpf_map__fd(ctx.skel->maps.map_random_data);
+	hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap);
+
+	while (true) {
+		i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED);
+		if (i > args.nr_entries)
+			break;
+again:
+		err = syscall(__NR_getrandom, &val, sizeof(val), 0);
+		if (err != sizeof(val)) {
+			ctx.map_prepare_err = true;
+			fprintf(stderr, "failed to get random value\n");
+			break;
+		}
+		err = bpf_map_update_elem(hashmap_fd, &val, &val, BPF_NOEXIST);
+		if (err) {
+			if (err != -EEXIST) {
+				ctx.map_prepare_err = true;
+				fprintf(stderr, "failed to add elem to hashmap: %d\n", -errno);
+				break;
+			}
+			goto again;
+		}
+
+		i--;
+		err = bpf_map_update_elem(random_data_fd, &i, &val, 0);
+		if (err) {
+			ctx.map_prepare_err = true;
+			fprintf(stderr, "failed to add elem to array: %d\n", -errno);
+			break;
+		}
+
+		err = bpf_map_update_elem(bloom_filter_fd, NULL, &val, 0);
+		if (err) {
+			ctx.map_prepare_err = true;
+			fprintf(stderr, "failed to add elem to bloom_filter: %d\n", -errno);
+			break;
+		}
+	}
+
+	pthread_mutex_lock(&ctx.map_done_mtx);
+	pthread_cond_signal(&ctx.map_done);
+	pthread_mutex_unlock(&ctx.map_done_mtx);
+
+	return NULL;
+}
+
+static void populate_maps(void)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	pthread_t map_thread;
+	int i, err;
+
+	for (i = 0; i < nr_cpus; i++) {
+		err = pthread_create(&map_thread, NULL, map_prepare_thread,
+				     NULL);
+		if (err) {
+			fprintf(stderr, "failed to create pthread: %d\n", -errno);
+			exit(1);
+		}
+	}
+
+	pthread_mutex_lock(&ctx.map_done_mtx);
+	pthread_cond_wait(&ctx.map_done, &ctx.map_done_mtx);
+	pthread_mutex_unlock(&ctx.map_done_mtx);
+
+	if (ctx.map_prepare_err)
+		exit(1);
+}
+
+static struct bloom_filter_map *setup_skeleton(void)
+{
+	struct bloom_filter_map *skel;
+	int err;
+
+	setup_libbpf();
+
+	skel = bloom_filter_map__open();
+	if (!skel) {
+		fprintf(stderr, "failed to open skeleton\n");
+		exit(1);
+	}
+
+	err = bpf_map__resize(skel->maps.map_random_data, args.nr_entries);
+	if (err) {
+		fprintf(stderr, "failed to resize map_random_data\n");
+		exit(1);
+	}
+
+	err = bpf_map__resize(skel->maps.hashmap, args.nr_entries);
+	if (err) {
+		fprintf(stderr, "failed to resize hashmap\n");
+		exit(1);
+	}
+
+	err = bpf_map__resize(skel->maps.map_bloom_filter, args.nr_entries);
+	if (err) {
+		fprintf(stderr, "failed to resize bloom filter\n");
+		exit(1);
+	}
+
+	err = bpf_map__set_nr_hashes(skel->maps.map_bloom_filter, args.nr_hashes);
+	if (err) {
+		fprintf(stderr, "failed to set n hashes\n");
+		exit(1);
+	}
+
+	if (bloom_filter_map__load(skel)) {
+		fprintf(stderr, "failed to load skeleton\n");
+		exit(1);
+	}
+
+	return skel;
+}
+
+static void bloom_filter_map_setup(void)
+{
+	struct bpf_link *link;
+
+	ctx.skel = setup_skeleton();
+
+	populate_maps();
+
+	link = bpf_program__attach(ctx.skel->progs.prog_bloom_filter);
+	if (!link) {
+		fprintf(stderr, "failed to attach program!\n");
+		exit(1);
+	}
+}
+
+static void hashmap_lookup_setup(void)
+{
+	struct bpf_link *link;
+
+	ctx.skel = setup_skeleton();
+
+	populate_maps();
+
+	link = bpf_program__attach(ctx.skel->progs.prog_bloom_filter_hashmap_lookup);
+	if (!link) {
+		fprintf(stderr, "failed to attach program!\n");
+		exit(1);
+	}
+}
+
+static void measure(struct bench_res *res)
+{
+	long total_hits = 0, total_drops = 0, total_false_hits = 0;
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	BPF_DECLARE_PERCPU(__u64, zeroed_values);
+	BPF_DECLARE_PERCPU(__u64, false_hits);
+	BPF_DECLARE_PERCPU(__u64, drops);
+	BPF_DECLARE_PERCPU(__u64, hits);
+	int err, i, percpu_array_fd;
+	__u32 key;
+
+	if (ctx.skel->bss->error != 0) {
+		fprintf(stderr, "error (%d) when searching the bloom filter\n",
+			ctx.skel->bss->error);
+		exit(1);
+	}
+
+	key = ctx.skel->rodata->hit_key;
+	percpu_array_fd = bpf_map__fd(ctx.skel->maps.percpu_array);
+	err = bpf_map_lookup_elem(percpu_array_fd, &key, hits);
+	if (err) {
+		fprintf(stderr, "lookup in the percpu array  for 'hits' failed: %d\n",
+			-errno);
+		exit(1);
+	}
+
+	key = ctx.skel->rodata->drop_key;
+	err = bpf_map_lookup_elem(percpu_array_fd, &key, drops);
+	if (err) {
+		fprintf(stderr, "lookup in the percpu array for 'drops' failed: %d\n",
+			-errno);
+		exit(1);
+	}
+
+	key = ctx.skel->rodata->false_hit_key;
+	err = bpf_map_lookup_elem(percpu_array_fd, &key, false_hits);
+	if (err) {
+		fprintf(stderr, "lookup in the percpu array for 'false hits' failed: %d\n",
+			-errno);
+		exit(1);
+	}
+
+	for (i = 0; i < nr_cpus; i++) {
+		total_hits += bpf_percpu(hits, i);
+		total_drops += bpf_percpu(drops, i);
+		total_false_hits += bpf_percpu(false_hits, i);
+	}
+
+	res->hits = total_hits;
+	res->drops = total_drops;
+	res->false_hits = total_false_hits;
+
+	memset(zeroed_values, 0, sizeof(zeroed_values));
+
+	/* zero out the percpu array */
+	key = ctx.skel->rodata->hit_key;
+	err = bpf_map_update_elem(percpu_array_fd, &key, zeroed_values, BPF_ANY);
+	if (err) {
+		fprintf(stderr, "zeroing the percpu array failed: %d\n", -errno);
+		exit(1);
+	}
+	key = ctx.skel->rodata->drop_key;
+	err = bpf_map_update_elem(percpu_array_fd, &key, zeroed_values, BPF_ANY);
+	if (err) {
+		fprintf(stderr, "zeroing the percpu array failed: %d\n", -errno);
+		exit(1);
+	}
+	key = ctx.skel->rodata->false_hit_key;
+	err = bpf_map_update_elem(percpu_array_fd, &key, zeroed_values, BPF_ANY);
+	if (err) {
+		fprintf(stderr, "zeroing the percpu array failed: %d\n", -errno);
+		exit(1);
+	}
+}
+
+static void *consumer(void *input)
+{
+	return NULL;
+}
+
+const struct bench bench_bloom_filter_map = {
+	.name = "bloom-filter-map",
+	.validate = validate,
+	.setup = bloom_filter_map_setup,
+	.producer_thread = producer,
+	.consumer_thread = consumer,
+	.measure = measure,
+	.report_progress = hits_drops_report_progress,
+	.report_final = hits_drops_report_final,
+};
+
+const struct bench bench_bloom_filter_false_positive = {
+	.name = "bloom-filter-false-positive",
+	.validate = validate,
+	.setup = hashmap_lookup_setup,
+	.producer_thread = producer,
+	.consumer_thread = consumer,
+	.measure = measure,
+	.report_progress = false_hits_report_progress,
+	.report_final = false_hits_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh
new file mode 100755
index 000000000000..8f2de6e39313
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+header "Bloom filter map"
+for t in 1 4 8; do
+for h in {1..10}; do
+subtitle "# threads: $t, # hashes: $h"
+	for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do
+		printf "%'d entries -\n" $e
+		printf "\t"
+		summarize "Total operations: " \
+			"$($RUN_BENCH -p $t --nr_hashes $h --nr_entries $e bloom-filter-map)"
+		printf "\t"
+		summarize_percentage "False positive rate: " \
+			"$($RUN_BENCH -p $t --nr_hashes $h --nr_entries $e bloom-filter-false-positive)"
+	done
+	printf "\n"
+done
+done
+
+header "Bloom filter map, multi-producer contention"
+for t in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
+	summarize "$t threads - " "$($RUN_BENCH -p $t bloom-filter-map)"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
index af4aa04caba6..ada028aa9007 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
@@ -1,34 +1,8 @@
 #!/bin/bash
 
-set -eufo pipefail
-
-RUN_BENCH="sudo ./bench -w3 -d10 -a"
-
-function hits()
-{
-	echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
-}
-
-function drops()
-{
-	echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
-}
+source ./benchs/run_common.sh
 
-function header()
-{
-	local len=${#1}
-
-	printf "\n%s\n" "$1"
-	for i in $(seq 1 $len); do printf '='; done
-	printf '\n'
-}
-
-function summarize()
-{
-	bench="$1"
-	summary=$(echo $2 | tail -n1)
-	printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)"
-}
+set -eufo pipefail
 
 header "Single-producer, parallel producer"
 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh
new file mode 100644
index 000000000000..670f23b037c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_common.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+RUN_BENCH="sudo ./bench -w3 -d10 -a"
+
+function header()
+{
+	local len=${#1}
+
+	printf "\n%s\n" "$1"
+	for i in $(seq 1 $len); do printf '='; done
+	printf '\n'
+}
+
+function subtitle()
+{
+	local len=${#1}
+	printf "\t%s\n" "$1"
+}
+
+function hits()
+{
+	echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
+}
+
+function drops()
+{
+	echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
+}
+
+function percentage()
+{
+	echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/"
+}
+
+function summarize()
+{
+	bench="$1"
+	summary=$(echo $2 | tail -n1)
+	printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)"
+}
+
+function summarize_percentage()
+{
+	bench="$1"
+	summary=$(echo $2 | tail -n1)
+	printf "%-20s %s%%\n" "$bench" "$(percentage $summary)"
+}
diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_map.c b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
index 2d9c43a30246..1b139689219e 100644
--- a/tools/testing/selftests/bpf/progs/bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
@@ -1,7 +1,9 @@
 // SPDX-License-Identifier: GPL-3.0
 /* Copyright (c) 2021 Facebook */
 
+#include <errno.h>
 #include <linux/bpf.h>
+#include <stdbool.h>
 #include <bpf/bpf_helpers.h>
 
 char _license[] SEC("license") = "GPL";
@@ -23,8 +25,38 @@ struct {
 	__uint(nr_hashes, 2);
 } map_bloom_filter SEC(".maps");
 
+/* Tracks the number of hits, drops, and false hits */
+struct {
+	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+	__uint(max_entries, 3);
+	__type(key, __u32);
+	__type(value, __u64);
+} percpu_array SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 1000);
+	__type(key, __u64);
+	__type(value, __u64);
+} hashmap SEC(".maps");
+
+const __u32 hit_key  = 0;
+const __u32 drop_key  = 1;
+const __u32 false_hit_key = 2;
+
+bool hashmap_use_bloom_filter = true;
+
 int error = 0;
 
+static __always_inline void log_result(__u32 key)
+{
+	__u64 *count;
+
+	count = bpf_map_lookup_elem(&percpu_array, &key);
+	if (count)
+		*count += 1;
+}
+
 static __u64
 check_elem(struct bpf_map *map, __u32 *key, __u64 *val,
 	   void *data)
@@ -37,6 +69,8 @@ check_elem(struct bpf_map *map, __u32 *key, __u64 *val,
 		return 1; /* stop the iteration */
 	}
 
+	log_result(hit_key);
+
 	return 0;
 }
 
@@ -47,3 +81,43 @@ int prog_bloom_filter(void *ctx)
 
 	return 0;
 }
+
+SEC("fentry/__x64_sys_getpgid")
+int prog_bloom_filter_hashmap_lookup(void *ctx)
+{
+	__u64 *result;
+	int i, err;
+
+	union {
+		__u64 data64;
+		__u32 data32[2];
+	} val;
+
+	for (i = 0; i < 512; i++) {
+		val.data32[0] = bpf_get_prandom_u32();
+		val.data32[1] = bpf_get_prandom_u32();
+
+		if (hashmap_use_bloom_filter) {
+			err = bpf_map_peek_elem(&map_bloom_filter, &val);
+			if (err) {
+				if (err != -ENOENT) {
+					error |= 2;
+					return 0;
+				}
+				log_result(drop_key);
+				continue;
+			}
+		}
+
+		result = bpf_map_lookup_elem(&hashmap, &val);
+		if (result) {
+			log_result(hit_key);
+		} else {
+			if (hashmap_use_bloom_filter)
+				log_result(false_hit_key);
+			log_result(drop_key);
+		}
+	}
+
+	return 0;
+}
-- 
2.30.2


  parent reply	other threads:[~2021-08-31 22:50 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-31 22:50 [PATCH bpf-next 0/5] Implement bloom filter map Joanne Koong
2021-08-31 22:50 ` [PATCH bpf-next 1/5] bpf: Add bloom filter map implementation Joanne Koong
2021-09-01  2:55   ` Alexei Starovoitov
2021-09-02 19:11     ` Joanne Koong
2021-09-02  1:44   ` Andrii Nakryiko
2021-09-02  5:11     ` John Fastabend
2021-09-02  6:16       ` Martin KaFai Lau
2021-09-02 22:07       ` Joanne Koong
2021-09-03  0:56         ` Martin KaFai Lau
2021-09-03  7:13           ` Joanne Koong
2021-09-03 17:19             ` Andrii Nakryiko
2021-09-03 17:22         ` John Fastabend
2021-09-08 19:10           ` Joanne Koong
2021-09-02  3:16   ` John Fastabend
2021-09-02  3:28     ` Andrii Nakryiko
2021-09-02  4:40       ` John Fastabend
2021-08-31 22:50 ` [PATCH bpf-next 2/5] libbpf: Allow the number of hashes in bloom filter maps to be configurable Joanne Koong
2021-09-02  3:30   ` Andrii Nakryiko
2021-08-31 22:50 ` [PATCH bpf-next 3/5] selftests/bpf: Add bloom filter map test cases Joanne Koong
2021-09-01  2:55   ` Alexei Starovoitov
2021-08-31 22:50 ` Joanne Koong [this message]
2021-09-02  3:35   ` [PATCH bpf-next 4/5] bpf/benchs: Add benchmark test for bloom filter maps Andrii Nakryiko
2021-08-31 22:50 ` [PATCH bpf-next 5/5] bpf/benchs: Add benchmarks for comparing hashmap lookups with vs. without bloom filter Joanne Koong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210831225005.2762202-5-joannekoong@fb.com \
    --to=joannekoong@fb.com \
    --cc=bpf@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.