All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH nft] cache: do not populate cache if it is going to be flushed
@ 2021-11-08 18:21 Pablo Neira Ayuso
  0 siblings, 0 replies; only message in thread
From: Pablo Neira Ayuso @ 2021-11-08 18:21 UTC (permalink / raw)
  To: netfilter-devel

Skip set element netlink dump if set is flushed, this speeds up
set flush + add element operation in a batch file for an existing set.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 include/cache.h | 13 +++++++++++--
 src/cache.c     | 40 ++++++++++++++++++++++++++++++++++++++--
 2 files changed, 49 insertions(+), 4 deletions(-)

diff --git a/include/cache.h b/include/cache.h
index 0523358889de..f3572319c451 100644
--- a/include/cache.h
+++ b/include/cache.h
@@ -38,9 +38,18 @@ enum cache_level_flags {
 	NFT_CACHE_FLUSHED	= (1 << 31),
 };
 
+#define NFT_CACHE_MAX_FILTER		12
+
 struct nft_cache_filter {
-	const char		*table;
-	const char		*set;
+	const char			*table;
+	const char			*set;
+
+	struct {
+		const char		*table;
+		const char		*set;
+	} obj[NFT_CACHE_MAX_FILTER];
+
+	unsigned int			num_objs;
 };
 
 struct nft_cache;
diff --git a/src/cache.c b/src/cache.c
index 0cddd1e1cb48..08f6ca700263 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -96,13 +96,43 @@ static unsigned int evaluate_cache_get(struct cmd *cmd, unsigned int flags)
 	return flags;
 }
 
-static unsigned int evaluate_cache_flush(struct cmd *cmd, unsigned int flags)
+static void cache_filter_add(struct nft_cache_filter *filter,
+			     const struct cmd *cmd)
+{
+	int i;
+
+	if (filter->num_objs >= NFT_CACHE_MAX_FILTER)
+		return;
+
+	i = filter->num_objs;
+	filter->obj[i].table = cmd->handle.table.name;
+	filter->obj[i].set = cmd->handle.set.name;
+	filter->num_objs++;
+}
+
+static bool cache_filter_find(const struct nft_cache_filter *filter,
+			      const struct handle *handle)
+{
+	unsigned int i;
+
+	for (i = 0; i < filter->num_objs; i++) {
+		if (!strcmp(filter->obj[i].table, handle->table.name) &&
+		    !strcmp(filter->obj[i].set, handle->set.name))
+			return true;
+	}
+
+	return false;
+}
+
+static unsigned int evaluate_cache_flush(struct cmd *cmd, unsigned int flags,
+					 struct nft_cache_filter *filter)
 {
 	switch (cmd->obj) {
 	case CMD_OBJ_SET:
 	case CMD_OBJ_MAP:
 	case CMD_OBJ_METER:
 		flags |= NFT_CACHE_SET;
+		cache_filter_add(filter, cmd);
 		break;
 	case CMD_OBJ_RULESET:
 		flags |= NFT_CACHE_FLUSHED;
@@ -219,7 +249,7 @@ unsigned int nft_cache_evaluate(struct nft_ctx *nft, struct list_head *cmds,
 			flags |= NFT_CACHE_FULL;
 			break;
 		case CMD_FLUSH:
-			flags = evaluate_cache_flush(cmd, flags);
+			flags = evaluate_cache_flush(cmd, flags, filter);
 			break;
 		case CMD_RENAME:
 			flags = evaluate_cache_rename(cmd, flags);
@@ -685,6 +715,9 @@ static int cache_init_objects(struct netlink_ctx *ctx, unsigned int flags,
 		}
 		if (flags & NFT_CACHE_SETELEM_BIT) {
 			list_for_each_entry(set, &table->set_cache.list, cache.list) {
+				if (cache_filter_find(filter, &set->handle))
+					continue;
+
 				ret = netlink_list_setelems(ctx, &set->handle,
 							    set);
 				if (ret < 0) {
@@ -694,6 +727,9 @@ static int cache_init_objects(struct netlink_ctx *ctx, unsigned int flags,
 			}
 		} else if (flags & NFT_CACHE_SETELEM_MAYBE) {
 			list_for_each_entry(set, &table->set_cache.list, cache.list) {
+				if (cache_filter_find(filter, &set->handle))
+					continue;
+
 				if (!set_is_non_concat_range(set))
 					continue;
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2021-11-08 18:21 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-08 18:21 [PATCH nft] cache: do not populate cache if it is going to be flushed Pablo Neira Ayuso

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.