All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v2 0/2] net: openvswitch: masks cache enhancements
@ 2020-07-23 12:58 Eelco Chaudron
  2020-07-23 12:58 ` [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
  2020-07-23 12:58 ` [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
  0 siblings, 2 replies; 6+ messages in thread
From: Eelco Chaudron @ 2020-07-23 12:58 UTC (permalink / raw)
  To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar, fw

This patchset adds two enhancements to the Open vSwitch masks cache.

Signed-off-by: Eelco Chaudron <echaudro@redhat.com>

Changes in v2:
 - Fix sparse warnings
 - Fix netlink policy items reported by Florian Westphal

Eelco Chaudron (2):
      net: openvswitch: add masks cache hit counter
      net: openvswitch: make masks cache size configurable


 include/uapi/linux/openvswitch.h |    3 +
 net/openvswitch/datapath.c       |   19 ++++++-
 net/openvswitch/datapath.h       |    3 +
 net/openvswitch/flow_table.c     |  109 +++++++++++++++++++++++++++++++++-----
 net/openvswitch/flow_table.h     |   13 ++++-
 5 files changed, 128 insertions(+), 19 deletions(-)


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter
  2020-07-23 12:58 [PATCH net-next v2 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
@ 2020-07-23 12:58 ` Eelco Chaudron
  2020-07-24  2:37   ` [ovs-dev] " Tonghao Zhang
  2020-07-23 12:58 ` [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
  1 sibling, 1 reply; 6+ messages in thread
From: Eelco Chaudron @ 2020-07-23 12:58 UTC (permalink / raw)
  To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar, fw

Add a counter that counts the number of masks cache hits, and
export it through the megaflow netlink statistics.

Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
 include/uapi/linux/openvswitch.h |    2 +-
 net/openvswitch/datapath.c       |    5 ++++-
 net/openvswitch/datapath.h       |    3 +++
 net/openvswitch/flow_table.c     |   19 ++++++++++++++-----
 net/openvswitch/flow_table.h     |    3 ++-
 5 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 9b14519e74d9..7cb76e5ca7cf 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats {
 	__u64 n_mask_hit;	 /* Number of masks used for flow lookups. */
 	__u32 n_masks;		 /* Number of masks for the datapath. */
 	__u32 pad0;		 /* Pad for future expension. */
+	__u64 n_cache_hit;       /* Number of cache matches for flow lookups. */
 	__u64 pad1;		 /* Pad for future expension. */
-	__u64 pad2;		 /* Pad for future expension. */
 };
 
 struct ovs_vport_stats {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 95805f0e27bd..a54df1fe3ec4 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
 	struct dp_stats_percpu *stats;
 	u64 *stats_counter;
 	u32 n_mask_hit;
+	u32 n_cache_hit;
 	int error;
 
 	stats = this_cpu_ptr(dp->stats_percpu);
 
 	/* Look up flow. */
 	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
-					 &n_mask_hit);
+					 &n_mask_hit, &n_cache_hit);
 	if (unlikely(!flow)) {
 		struct dp_upcall_info upcall;
 
@@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
 	u64_stats_update_begin(&stats->syncp);
 	(*stats_counter)++;
 	stats->n_mask_hit += n_mask_hit;
+	stats->n_cache_hit += n_cache_hit;
 	u64_stats_update_end(&stats->syncp);
 }
 
@@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
 		stats->n_missed += local_stats.n_missed;
 		stats->n_lost += local_stats.n_lost;
 		mega_stats->n_mask_hit += local_stats.n_mask_hit;
+		mega_stats->n_cache_hit += local_stats.n_cache_hit;
 	}
 }
 
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 697a2354194b..86d78613edb4 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -38,12 +38,15 @@
  * @n_mask_hit: Number of masks looked up for flow match.
  *   @n_mask_hit / (@n_hit + @n_missed)  will be the average masks looked
  *   up per packet.
+ * @n_cache_hit: The number of received packets that had their mask found using
+ * the mask cache.
  */
 struct dp_stats_percpu {
 	u64 n_hit;
 	u64 n_missed;
 	u64 n_lost;
 	u64 n_mask_hit;
+	u64 n_cache_hit;
 	struct u64_stats_sync syncp;
 };
 
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index af22c9ee28dd..a5912ea05352 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				   struct mask_array *ma,
 				   const struct sw_flow_key *key,
 				   u32 *n_mask_hit,
+				   u32 *n_cache_hit,
 				   u32 *index)
 {
 	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
@@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				u64_stats_update_begin(&ma->syncp);
 				usage_counters[*index]++;
 				u64_stats_update_end(&ma->syncp);
+				(*n_cache_hit)++;
 				return flow;
 			}
 		}
@@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 					  const struct sw_flow_key *key,
 					  u32 skb_hash,
-					  u32 *n_mask_hit)
+					  u32 *n_mask_hit,
+					  u32 *n_cache_hit)
 {
 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
 	struct table_instance *ti = rcu_dereference(tbl->ti);
@@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 	int seg;
 
 	*n_mask_hit = 0;
+	*n_cache_hit = 0;
 	if (unlikely(!skb_hash)) {
 		u32 mask_index = 0;
+		u32 cache = 0;
 
-		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
+		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
+				   &mask_index);
 	}
 
 	/* Pre and post recirulation flows usually have the same skb_hash
@@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 		e = &entries[index];
 		if (e->skb_hash == skb_hash) {
 			flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
-					   &e->mask_index);
+					   n_cache_hit, &e->mask_index);
 			if (!flow)
 				e->skb_hash = 0;
 			return flow;
@@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 	}
 
 	/* Cache miss, do full lookup. */
-	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
+			   &ce->mask_index);
 	if (flow)
 		ce->skb_hash = skb_hash;
 
+	*n_cache_hit = 0;
 	return flow;
 }
 
@@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
 	u32 __always_unused n_mask_hit;
+	u32 __always_unused n_cache_hit;
 	u32 index = 0;
 
-	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
+	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
 }
 
 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1f664b050e3b..325e939371d8 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
 					  const struct sw_flow_key *,
 					  u32 skb_hash,
-					  u32 *n_mask_hit);
+					  u32 *n_mask_hit,
+					  u32 *n_cache_hit);
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
 				    const struct sw_flow_key *);
 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable
  2020-07-23 12:58 [PATCH net-next v2 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
  2020-07-23 12:58 ` [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
@ 2020-07-23 12:58 ` Eelco Chaudron
  2020-07-23 16:42   ` Jakub Kicinski
  1 sibling, 1 reply; 6+ messages in thread
From: Eelco Chaudron @ 2020-07-23 12:58 UTC (permalink / raw)
  To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar, fw

This patch makes the masks cache size configurable, or with
a size of 0, disable it.

Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
 include/uapi/linux/openvswitch.h |    1 
 net/openvswitch/datapath.c       |   14 ++++++
 net/openvswitch/flow_table.c     |   90 ++++++++++++++++++++++++++++++++++----
 net/openvswitch/flow_table.h     |   10 ++++
 4 files changed, 104 insertions(+), 11 deletions(-)

diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 7cb76e5ca7cf..8300cc29dec8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -86,6 +86,7 @@ enum ovs_datapath_attr {
 	OVS_DP_ATTR_MEGAFLOW_STATS,	/* struct ovs_dp_megaflow_stats */
 	OVS_DP_ATTR_USER_FEATURES,	/* OVS_DP_F_*  */
 	OVS_DP_ATTR_PAD,
+	OVS_DP_ATTR_MASKS_CACHE_SIZE,
 	__OVS_DP_ATTR_MAX
 };
 
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a54df1fe3ec4..114b2ddb8037 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1498,6 +1498,7 @@ static size_t ovs_dp_cmd_msg_size(void)
 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
 	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
+	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */
 
 	return msgsize;
 }
@@ -1535,6 +1536,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
 		goto nla_put_failure;
 
+	if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
+			ovs_flow_tbl_masks_cache_size(&dp->table)))
+		goto nla_put_failure;
+
 	genlmsg_end(skb, ovs_header);
 	return 0;
 
@@ -1599,6 +1604,13 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
 #endif
 	}
 
+	if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
+		u32 cache_size;
+
+		cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
+		ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
+	}
+
 	dp->user_features = user_features;
 
 	if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
@@ -1894,6 +1906,8 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
 	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
 	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
 	[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+	[OVS_DP_ATTR_MASKS_CACHE_SIZE] =  NLA_POLICY_RANGE(NLA_U32, 0,
+		PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
 };
 
 static const struct genl_ops dp_datapath_genl_ops[] = {
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index a5912ea05352..386b7e7a0454 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -38,8 +38,8 @@
 #define MASK_ARRAY_SIZE_MIN	16
 #define REHASH_INTERVAL		(10 * 60 * HZ)
 
+#define MC_DEFAULT_HASH_ENTRIES	256
 #define MC_HASH_SHIFT		8
-#define MC_HASH_ENTRIES		(1u << MC_HASH_SHIFT)
 #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
 
 static struct kmem_cache *flow_cache;
@@ -341,15 +341,76 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
 	}
 }
 
+static void __mask_cache_destroy(struct mask_cache *mc)
+{
+	if (mc->mask_cache)
+		free_percpu(mc->mask_cache);
+	kfree(mc);
+}
+
+static void mask_cache_rcu_cb(struct rcu_head *rcu)
+{
+	struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
+
+	__mask_cache_destroy(mc);
+}
+
+static struct mask_cache *tbl_mask_cache_alloc(u32 size)
+{
+	struct mask_cache_entry __percpu *cache = NULL;
+	struct mask_cache *new;
+
+	/* Only allow size to be 0, or a power of 2, and does not exceed
+	 * percpu allocation size.
+	 */
+	if ((size & (size - 1)) != 0 ||
+	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
+		return NULL;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	new->cache_size = size;
+	if (new->cache_size > 0) {
+		cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+				       new->cache_size,
+				       __alignof__(struct mask_cache_entry));
+
+		if (!cache) {
+			kfree(new);
+			return NULL;
+		}
+	}
+
+	new->mask_cache = cache;
+	return new;
+}
+
+void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
+{
+	struct mask_cache *mc = rcu_dereference(table->mask_cache);
+	struct mask_cache *new;
+
+	if (size == mc->cache_size || (size & (size - 1)) != 0)
+		return;
+
+	new = tbl_mask_cache_alloc(size);
+	if (!new)
+		return;
+
+	rcu_assign_pointer(table->mask_cache, new);
+	call_rcu(&mc->rcu, mask_cache_rcu_cb);
+}
+
 int ovs_flow_tbl_init(struct flow_table *table)
 {
 	struct table_instance *ti, *ufid_ti;
+	struct mask_cache *mc;
 	struct mask_array *ma;
 
-	table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
-					   MC_HASH_ENTRIES,
-					   __alignof__(struct mask_cache_entry));
-	if (!table->mask_cache)
+	mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
+	if (!mc)
 		return -ENOMEM;
 
 	ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
@@ -367,6 +428,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
 	rcu_assign_pointer(table->ti, ti);
 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
 	rcu_assign_pointer(table->mask_array, ma);
+	rcu_assign_pointer(table->mask_cache, mc);
 	table->last_rehash = jiffies;
 	table->count = 0;
 	table->ufid_count = 0;
@@ -377,7 +439,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
 free_mask_array:
 	__mask_array_destroy(ma);
 free_mask_cache:
-	free_percpu(table->mask_cache);
+	__mask_cache_destroy(mc);
 	return -ENOMEM;
 }
 
@@ -454,7 +516,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
 	struct table_instance *ti = rcu_dereference_raw(table->ti);
 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 
-	free_percpu(table->mask_cache);
+	call_rcu(&table->mask_cache->rcu, mask_cache_rcu_cb);
 	call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
 	table_instance_destroy(table, ti, ufid_ti, false);
 }
@@ -724,6 +786,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 					  u32 *n_mask_hit,
 					  u32 *n_cache_hit)
 {
+	struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
 	struct table_instance *ti = rcu_dereference(tbl->ti);
 	struct mask_cache_entry *entries, *ce;
@@ -733,7 +796,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 
 	*n_mask_hit = 0;
 	*n_cache_hit = 0;
-	if (unlikely(!skb_hash)) {
+	if (unlikely(!skb_hash || mc->cache_size == 0)) {
 		u32 mask_index = 0;
 		u32 cache = 0;
 
@@ -749,11 +812,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 
 	ce = NULL;
 	hash = skb_hash;
-	entries = this_cpu_ptr(tbl->mask_cache);
+	entries = this_cpu_ptr(mc->mask_cache);
 
 	/* Find the cache entry 'ce' to operate on. */
 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
-		int index = hash & (MC_HASH_ENTRIES - 1);
+		int index = hash & (mc->cache_size - 1);
 		struct mask_cache_entry *e;
 
 		e = &entries[index];
@@ -867,6 +930,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
 	return READ_ONCE(ma->count);
 }
 
+u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
+{
+	struct mask_cache *mc = rcu_dereference(table->mask_cache);
+
+	return READ_ONCE(mc->cache_size);
+}
+
 static struct table_instance *table_instance_expand(struct table_instance *ti,
 						    bool ufid)
 {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 325e939371d8..f2dba952db2f 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -27,6 +27,12 @@ struct mask_cache_entry {
 	u32 mask_index;
 };
 
+struct mask_cache {
+	struct rcu_head rcu;
+	u32 cache_size;  /* Must be ^2 value. */
+	struct mask_cache_entry __percpu *mask_cache;
+};
+
 struct mask_count {
 	int index;
 	u64 counter;
@@ -53,7 +59,7 @@ struct table_instance {
 struct flow_table {
 	struct table_instance __rcu *ti;
 	struct table_instance __rcu *ufid_ti;
-	struct mask_cache_entry __percpu *mask_cache;
+	struct mask_cache __rcu *mask_cache;
 	struct mask_array __rcu *mask_array;
 	unsigned long last_rehash;
 	unsigned int count;
@@ -77,6 +83,8 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
 			const struct sw_flow_mask *mask);
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
 int  ovs_flow_tbl_num_masks(const struct flow_table *table);
+u32  ovs_flow_tbl_masks_cache_size(const struct flow_table *table);
+void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size);
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
 				       u32 *bucket, u32 *idx);
 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable
  2020-07-23 12:58 ` [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
@ 2020-07-23 16:42   ` Jakub Kicinski
  2020-07-24  8:00     ` Eelco Chaudron
  0 siblings, 1 reply; 6+ messages in thread
From: Jakub Kicinski @ 2020-07-23 16:42 UTC (permalink / raw)
  To: Eelco Chaudron; +Cc: netdev, davem, dev, pabeni, pshelar, fw

On Thu, 23 Jul 2020 14:58:31 +0200 Eelco Chaudron wrote:
> +	if ((size & (size - 1)) != 0 ||

is_power_of_2() ?

> +	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
> +		return NULL;

> +	new->cache_size = size;
> +	if (new->cache_size > 0) {
> +		cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
> +				       new->cache_size,

array_size() ?

> +				       __alignof__(struct mask_cache_entry));
> +

No need for the new line here

> +		if (!cache) {
> +			kfree(new);
> +			return NULL;
> +		}
> +	}

> +	if (size == mc->cache_size || (size & (size - 1)) != 0)

why check "is power of 2" twice?

> @@ -454,7 +516,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
>  	struct table_instance *ti = rcu_dereference_raw(table->ti);
>  	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
>  
> -	free_percpu(table->mask_cache);
> +	call_rcu(&table->mask_cache->rcu, mask_cache_rcu_cb);
>  	call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
>  	table_instance_destroy(table, ti, ufid_ti, false);
>  }

This adds a new warning :(

net/openvswitch/flow_table.c:519:24: warning: incorrect type in argument 1 (different address spaces)
net/openvswitch/flow_table.c:519:24:    expected struct callback_head *head
net/openvswitch/flow_table.c:519:24:    got struct callback_head [noderef] __rcu *

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [ovs-dev] [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter
  2020-07-23 12:58 ` [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
@ 2020-07-24  2:37   ` Tonghao Zhang
  0 siblings, 0 replies; 6+ messages in thread
From: Tonghao Zhang @ 2020-07-24  2:37 UTC (permalink / raw)
  To: Eelco Chaudron
  Cc: Linux Kernel Network Developers, ovs dev, Florian Westphal,
	Jakub Kicinski, Paolo Abeni, David Miller

On Thu, Jul 23, 2020 at 8:58 PM Eelco Chaudron <echaudro@redhat.com> wrote:
>
> Add a counter that counts the number of masks cache hits, and
> export it through the megaflow netlink statistics.
>
> Reviewed-by: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
LGTM
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>

> ---
>  include/uapi/linux/openvswitch.h |    2 +-
>  net/openvswitch/datapath.c       |    5 ++++-
>  net/openvswitch/datapath.h       |    3 +++
>  net/openvswitch/flow_table.c     |   19 ++++++++++++++-----
>  net/openvswitch/flow_table.h     |    3 ++-
>  5 files changed, 24 insertions(+), 8 deletions(-)
>
> diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
> index 9b14519e74d9..7cb76e5ca7cf 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats {
>         __u64 n_mask_hit;        /* Number of masks used for flow lookups. */
>         __u32 n_masks;           /* Number of masks for the datapath. */
>         __u32 pad0;              /* Pad for future expension. */
> +       __u64 n_cache_hit;       /* Number of cache matches for flow lookups. */
>         __u64 pad1;              /* Pad for future expension. */
> -       __u64 pad2;              /* Pad for future expension. */
>  };
>
>  struct ovs_vport_stats {
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index 95805f0e27bd..a54df1fe3ec4 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
>         struct dp_stats_percpu *stats;
>         u64 *stats_counter;
>         u32 n_mask_hit;
> +       u32 n_cache_hit;
>         int error;
>
>         stats = this_cpu_ptr(dp->stats_percpu);
>
>         /* Look up flow. */
>         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
> -                                        &n_mask_hit);
> +                                        &n_mask_hit, &n_cache_hit);
>         if (unlikely(!flow)) {
>                 struct dp_upcall_info upcall;
>
> @@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
>         u64_stats_update_begin(&stats->syncp);
>         (*stats_counter)++;
>         stats->n_mask_hit += n_mask_hit;
> +       stats->n_cache_hit += n_cache_hit;
>         u64_stats_update_end(&stats->syncp);
>  }
>
> @@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
>                 stats->n_missed += local_stats.n_missed;
>                 stats->n_lost += local_stats.n_lost;
>                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
> +               mega_stats->n_cache_hit += local_stats.n_cache_hit;
>         }
>  }
>
> diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
> index 697a2354194b..86d78613edb4 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -38,12 +38,15 @@
>   * @n_mask_hit: Number of masks looked up for flow match.
>   *   @n_mask_hit / (@n_hit + @n_missed)  will be the average masks looked
>   *   up per packet.
> + * @n_cache_hit: The number of received packets that had their mask found using
> + * the mask cache.
>   */
>  struct dp_stats_percpu {
>         u64 n_hit;
>         u64 n_missed;
>         u64 n_lost;
>         u64 n_mask_hit;
> +       u64 n_cache_hit;
>         struct u64_stats_sync syncp;
>  };
>
> diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
> index af22c9ee28dd..a5912ea05352 100644
> --- a/net/openvswitch/flow_table.c
> +++ b/net/openvswitch/flow_table.c
> @@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
>                                    struct mask_array *ma,
>                                    const struct sw_flow_key *key,
>                                    u32 *n_mask_hit,
> +                                  u32 *n_cache_hit,
>                                    u32 *index)
>  {
>         u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
> @@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
>                                 u64_stats_update_begin(&ma->syncp);
>                                 usage_counters[*index]++;
>                                 u64_stats_update_end(&ma->syncp);
> +                               (*n_cache_hit)++;
>                                 return flow;
>                         }
>                 }
> @@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
>  struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
>                                           const struct sw_flow_key *key,
>                                           u32 skb_hash,
> -                                         u32 *n_mask_hit)
> +                                         u32 *n_mask_hit,
> +                                         u32 *n_cache_hit)
>  {
>         struct mask_array *ma = rcu_dereference(tbl->mask_array);
>         struct table_instance *ti = rcu_dereference(tbl->ti);
> @@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
>         int seg;
>
>         *n_mask_hit = 0;
> +       *n_cache_hit = 0;
>         if (unlikely(!skb_hash)) {
>                 u32 mask_index = 0;
> +               u32 cache = 0;
>
> -               return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
> +               return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
> +                                  &mask_index);
>         }
>
>         /* Pre and post recirulation flows usually have the same skb_hash
> @@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
>                 e = &entries[index];
>                 if (e->skb_hash == skb_hash) {
>                         flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
> -                                          &e->mask_index);
> +                                          n_cache_hit, &e->mask_index);
>                         if (!flow)
>                                 e->skb_hash = 0;
>                         return flow;
> @@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
>         }
>
>         /* Cache miss, do full lookup. */
> -       flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
> +       flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
> +                          &ce->mask_index);
>         if (flow)
>                 ce->skb_hash = skb_hash;
>
> +       *n_cache_hit = 0;
>         return flow;
>  }
>
> @@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
>         struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>         struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
>         u32 __always_unused n_mask_hit;
> +       u32 __always_unused n_cache_hit;
>         u32 index = 0;
>
> -       return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
> +       return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
>  }
>
>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
> index 1f664b050e3b..325e939371d8 100644
> --- a/net/openvswitch/flow_table.h
> +++ b/net/openvswitch/flow_table.h
> @@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
>  struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
>                                           const struct sw_flow_key *,
>                                           u32 skb_hash,
> -                                         u32 *n_mask_hit);
> +                                         u32 *n_mask_hit,
> +                                         u32 *n_cache_hit);
>  struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
>                                     const struct sw_flow_key *);
>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
>
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev



--
Best regards, Tonghao

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable
  2020-07-23 16:42   ` Jakub Kicinski
@ 2020-07-24  8:00     ` Eelco Chaudron
  0 siblings, 0 replies; 6+ messages in thread
From: Eelco Chaudron @ 2020-07-24  8:00 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: netdev, davem, dev, pabeni, pshelar, fw



On 23 Jul 2020, at 18:42, Jakub Kicinski wrote:

> On Thu, 23 Jul 2020 14:58:31 +0200 Eelco Chaudron wrote:
>> +	if ((size & (size - 1)) != 0 ||
>
> is_power_of_2() ?

Was not aware of this macro, will replace…
>
>> +	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
>> +		return NULL;
>
>> +	new->cache_size = size;
>> +	if (new->cache_size > 0) {
>> +		cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
>> +				       new->cache_size,
>
> array_size() ?

Will change to:

		cache = __alloc_percpu(array_size(
					       sizeof(struct mask_cache_entry),
					       new->cache_size),
				       __alignof__(struct mask_cache_entry));
		if (!cache) {
>
>> +				       __alignof__(struct mask_cache_entry));
>> +
>
> No need for the new line here

Will remove

>> +		if (!cache) {
>> +			kfree(new);
>> +			return NULL;
>> +		}
>> +	}
>
>> +	if (size == mc->cache_size || (size & (size - 1)) != 0)
>
> why check "is power of 2" twice?

Will remove as it will always call tbl_mask_cache_alloc() which has the 
check above.

>
>> @@ -454,7 +516,7 @@ void ovs_flow_tbl_destroy(struct flow_table 
>> *table)
>>  	struct table_instance *ti = rcu_dereference_raw(table->ti);
>>  	struct table_instance *ufid_ti = 
>> rcu_dereference_raw(table->ufid_ti);
>>
>> -	free_percpu(table->mask_cache);
>> +	call_rcu(&table->mask_cache->rcu, mask_cache_rcu_cb);
>>  	call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
>>  	table_instance_destroy(table, ti, ufid_ti, false);
>>  }
>
> This adds a new warning :(
>
> net/openvswitch/flow_table.c:519:24: warning: incorrect type in 
> argument 1 (different address spaces)
> net/openvswitch/flow_table.c:519:24:    expected struct callback_head 
> *head
> net/openvswitch/flow_table.c:519:24:    got struct callback_head 
> [noderef] __rcu *

:( Yes my diff with older did not work… Sure I’ve fixed them all now 
:)


Will sent out a V3 after some testing…


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-07-24  8:00 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-23 12:58 [PATCH net-next v2 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2020-07-23 12:58 ` [PATCH net-next v2 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
2020-07-24  2:37   ` [ovs-dev] " Tonghao Zhang
2020-07-23 12:58 ` [PATCH net-next v2 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
2020-07-23 16:42   ` Jakub Kicinski
2020-07-24  8:00     ` Eelco Chaudron

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.