* [PATCH net-next 0/2] net: openvswitch: masks cache enhancements
@ 2020-07-22 8:27 Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
` (2 more replies)
0 siblings, 3 replies; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-22 8:27 UTC (permalink / raw)
To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar
This patchset adds two enhancements to the Open vSwitch masks cache.
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
Eelco Chaudron (2):
net: openvswitch: add masks cache hit counter
net: openvswitch: make masks cache size configurable
include/uapi/linux/openvswitch.h | 3 +
net/openvswitch/datapath.c | 16 +++++-
net/openvswitch/datapath.h | 3 +
net/openvswitch/flow_table.c | 105 +++++++++++++++++++++++++++++++++-----
net/openvswitch/flow_table.h | 13 ++++-
5 files changed, 122 insertions(+), 18 deletions(-)
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net-next 1/2] net: openvswitch: add masks cache hit counter
2020-07-22 8:27 [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
@ 2020-07-22 8:27 ` Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
2020-07-22 8:37 ` [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2 siblings, 0 replies; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-22 8:27 UTC (permalink / raw)
To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar
Add a counter that counts the number of masks cache hits, and
export it through the megaflow netlink statistics.
Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
include/uapi/linux/openvswitch.h | 2 +-
net/openvswitch/datapath.c | 5 ++++-
net/openvswitch/datapath.h | 3 +++
net/openvswitch/flow_table.c | 19 ++++++++++++++-----
net/openvswitch/flow_table.h | 3 ++-
5 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 9b14519e74d9..7cb76e5ca7cf 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats {
__u64 n_mask_hit; /* Number of masks used for flow lookups. */
__u32 n_masks; /* Number of masks for the datapath. */
__u32 pad0; /* Pad for future expension. */
+ __u64 n_cache_hit; /* Number of cache matches for flow lookups. */
__u64 pad1; /* Pad for future expension. */
- __u64 pad2; /* Pad for future expension. */
};
struct ovs_vport_stats {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 95805f0e27bd..a54df1fe3ec4 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
+ u32 n_cache_hit;
int error;
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
- &n_mask_hit);
+ &n_mask_hit, &n_cache_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
+ stats->n_cache_hit += n_cache_hit;
u64_stats_update_end(&stats->syncp);
}
@@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
stats->n_missed += local_stats.n_missed;
stats->n_lost += local_stats.n_lost;
mega_stats->n_mask_hit += local_stats.n_mask_hit;
+ mega_stats->n_cache_hit += local_stats.n_cache_hit;
}
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 697a2354194b..86d78613edb4 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -38,12 +38,15 @@
* @n_mask_hit: Number of masks looked up for flow match.
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
* up per packet.
+ * @n_cache_hit: The number of received packets that had their mask found using
+ * the mask cache.
*/
struct dp_stats_percpu {
u64 n_hit;
u64 n_missed;
u64 n_lost;
u64 n_mask_hit;
+ u64 n_cache_hit;
struct u64_stats_sync syncp;
};
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index af22c9ee28dd..a5912ea05352 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct mask_array *ma,
const struct sw_flow_key *key,
u32 *n_mask_hit,
+ u32 *n_cache_hit,
u32 *index)
{
u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
@@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u64_stats_update_begin(&ma->syncp);
usage_counters[*index]++;
u64_stats_update_end(&ma->syncp);
+ (*n_cache_hit)++;
return flow;
}
}
@@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 skb_hash,
- u32 *n_mask_hit)
+ u32 *n_mask_hit,
+ u32 *n_cache_hit)
{
struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti);
@@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
int seg;
*n_mask_hit = 0;
+ *n_cache_hit = 0;
if (unlikely(!skb_hash)) {
u32 mask_index = 0;
+ u32 cache = 0;
- return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
+ return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
+ &mask_index);
}
/* Pre and post recirulation flows usually have the same skb_hash
@@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
e = &entries[index];
if (e->skb_hash == skb_hash) {
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
- &e->mask_index);
+ n_cache_hit, &e->mask_index);
if (!flow)
e->skb_hash = 0;
return flow;
@@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
}
/* Cache miss, do full lookup. */
- flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
+ &ce->mask_index);
if (flow)
ce->skb_hash = skb_hash;
+ *n_cache_hit = 0;
return flow;
}
@@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
+ u32 __always_unused n_cache_hit;
u32 index = 0;
- return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
+ return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1f664b050e3b..325e939371d8 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
const struct sw_flow_key *,
u32 skb_hash,
- u32 *n_mask_hit);
+ u32 *n_mask_hit,
+ u32 *n_cache_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 8:27 [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
@ 2020-07-22 8:27 ` Eelco Chaudron
2020-07-22 15:21 ` Jakub Kicinski
` (2 more replies)
2020-07-22 8:37 ` [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2 siblings, 3 replies; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-22 8:27 UTC (permalink / raw)
To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar
This patch makes the masks cache size configurable, or with
a size of 0, disable it.
Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
include/uapi/linux/openvswitch.h | 1
net/openvswitch/datapath.c | 11 +++++
net/openvswitch/flow_table.c | 86 ++++++++++++++++++++++++++++++++++----
net/openvswitch/flow_table.h | 10 ++++
4 files changed, 98 insertions(+), 10 deletions(-)
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 7cb76e5ca7cf..8300cc29dec8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -86,6 +86,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
OVS_DP_ATTR_PAD,
+ OVS_DP_ATTR_MASKS_CACHE_SIZE,
__OVS_DP_ATTR_MAX
};
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a54df1fe3ec4..f08aa1fb9f4b 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1535,6 +1535,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
goto nla_put_failure;
+ if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
+ ovs_flow_tbl_masks_cache_size(&dp->table)))
+ goto nla_put_failure;
+
genlmsg_end(skb, ovs_header);
return 0;
@@ -1599,6 +1603,13 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
#endif
}
+ if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
+ u32 cache_size;
+
+ cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
+ ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
+ }
+
dp->user_features = user_features;
if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index a5912ea05352..1c3590bcf28b 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -38,8 +38,8 @@
#define MASK_ARRAY_SIZE_MIN 16
#define REHASH_INTERVAL (10 * 60 * HZ)
+#define MC_DEFAULT_HASH_ENTRIES 256
#define MC_HASH_SHIFT 8
-#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
static struct kmem_cache *flow_cache;
@@ -341,14 +341,74 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
}
}
+static void __mask_cache_destroy(struct mask_cache *mc)
+{
+ if (mc->mask_cache)
+ free_percpu(mc->mask_cache);
+ kfree(mc);
+}
+
+static void mask_cache_rcu_cb(struct rcu_head *rcu)
+{
+ struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
+
+ __mask_cache_destroy(mc);
+}
+
+static struct mask_cache *tbl_mask_cache_alloc(u32 size)
+{
+ struct mask_cache_entry *cache = NULL;
+ struct mask_cache *new;
+
+ /* Only allow size to be 0, or a power of 2, and does not exceed
+ * percpu allocation size.
+ */
+ if ((size & (size - 1)) != 0 ||
+ (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->cache_size = size;
+ if (new->cache_size > 0) {
+ cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+ new->cache_size,
+ __alignof__(struct mask_cache_entry));
+
+ if (!cache) {
+ kfree(new);
+ return NULL;
+ }
+ }
+
+ new->mask_cache = cache;
+ return new;
+}
+
+void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
+{
+ struct mask_cache *mc = rcu_dereference(table->mask_cache);
+ struct mask_cache *new;
+
+ if (size == mc->cache_size || (size & (size - 1)) != 0)
+ return;
+
+ new = tbl_mask_cache_alloc(size);
+ if (!new)
+ return;
+
+ rcu_assign_pointer(table->mask_cache, new);
+ call_rcu(&mc->rcu, mask_cache_rcu_cb);
+}
+
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti, *ufid_ti;
struct mask_array *ma;
- table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
- MC_HASH_ENTRIES,
- __alignof__(struct mask_cache_entry));
+ table->mask_cache = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
if (!table->mask_cache)
return -ENOMEM;
@@ -377,7 +437,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
free_mask_array:
__mask_array_destroy(ma);
free_mask_cache:
- free_percpu(table->mask_cache);
+ __mask_cache_destroy(table->mask_cache);
return -ENOMEM;
}
@@ -454,7 +514,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
- free_percpu(table->mask_cache);
+ call_rcu(&table->mask_cache->rcu, mask_cache_rcu_cb);
call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
table_instance_destroy(table, ti, ufid_ti, false);
}
@@ -724,6 +784,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
u32 *n_mask_hit,
u32 *n_cache_hit)
{
+ struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti);
struct mask_cache_entry *entries, *ce;
@@ -733,7 +794,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
*n_mask_hit = 0;
*n_cache_hit = 0;
- if (unlikely(!skb_hash)) {
+ if (unlikely(!skb_hash || mc->cache_size == 0)) {
u32 mask_index = 0;
u32 cache = 0;
@@ -749,11 +810,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
ce = NULL;
hash = skb_hash;
- entries = this_cpu_ptr(tbl->mask_cache);
+ entries = this_cpu_ptr(mc->mask_cache);
/* Find the cache entry 'ce' to operate on. */
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
- int index = hash & (MC_HASH_ENTRIES - 1);
+ int index = hash & (mc->cache_size - 1);
struct mask_cache_entry *e;
e = &entries[index];
@@ -867,6 +928,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
return READ_ONCE(ma->count);
}
+u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
+{
+ struct mask_cache *mc = rcu_dereference(table->mask_cache);
+
+ return READ_ONCE(mc->cache_size);
+}
+
static struct table_instance *table_instance_expand(struct table_instance *ti,
bool ufid)
{
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 325e939371d8..f2dba952db2f 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -27,6 +27,12 @@ struct mask_cache_entry {
u32 mask_index;
};
+struct mask_cache {
+ struct rcu_head rcu;
+ u32 cache_size; /* Must be ^2 value. */
+ struct mask_cache_entry __percpu *mask_cache;
+};
+
struct mask_count {
int index;
u64 counter;
@@ -53,7 +59,7 @@ struct table_instance {
struct flow_table {
struct table_instance __rcu *ti;
struct table_instance __rcu *ufid_ti;
- struct mask_cache_entry __percpu *mask_cache;
+ struct mask_cache __rcu *mask_cache;
struct mask_array __rcu *mask_array;
unsigned long last_rehash;
unsigned int count;
@@ -77,6 +83,8 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
const struct sw_flow_mask *mask);
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
int ovs_flow_tbl_num_masks(const struct flow_table *table);
+u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table);
+void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
u32 *bucket, u32 *idx);
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 0/2] net: openvswitch: masks cache enhancements
2020-07-22 8:27 [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
@ 2020-07-22 8:37 ` Eelco Chaudron
2 siblings, 0 replies; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-22 8:37 UTC (permalink / raw)
To: netdev; +Cc: davem, dev, kuba, pabeni, pshelar
On 22 Jul 2020, at 10:27, Eelco Chaudron wrote:
> This patchset adds two enhancements to the Open vSwitch masks cache.
>
> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
>
> Eelco Chaudron (2):
> net: openvswitch: add masks cache hit counter
> net: openvswitch: make masks cache size configurable
>
>
> include/uapi/linux/openvswitch.h | 3 +
> net/openvswitch/datapath.c | 16 +++++-
> net/openvswitch/datapath.h | 3 +
> net/openvswitch/flow_table.c | 105
> +++++++++++++++++++++++++++++++++-----
> net/openvswitch/flow_table.h | 13 ++++-
> 5 files changed, 122 insertions(+), 18 deletions(-)
FYI the userspace patch can be found here:
https://mail.openvswitch.org/pipermail/ovs-dev/2020-July/373159.html
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
@ 2020-07-22 15:21 ` Jakub Kicinski
2020-07-22 15:31 ` Eelco Chaudron
2020-07-22 17:40 ` kernel test robot
2020-07-22 19:22 ` Florian Westphal
2 siblings, 1 reply; 11+ messages in thread
From: Jakub Kicinski @ 2020-07-22 15:21 UTC (permalink / raw)
To: Eelco Chaudron; +Cc: netdev, davem, dev, pabeni, pshelar
On Wed, 22 Jul 2020 10:27:52 +0200 Eelco Chaudron wrote:
> This patch makes the masks cache size configurable, or with
> a size of 0, disable it.
>
> Reviewed-by: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
Hi Elco!
This patch adds a bunch of new sparse warnings:
net/openvswitch/flow_table.c:376:23: warning: incorrect type in assignment (different address spaces)
net/openvswitch/flow_table.c:376:23: expected struct mask_cache_entry *cache
net/openvswitch/flow_table.c:376:23: got void [noderef] __percpu *
net/openvswitch/flow_table.c:386:25: warning: incorrect type in assignment (different address spaces)
net/openvswitch/flow_table.c:386:25: expected struct mask_cache_entry [noderef] __percpu *mask_cache
net/openvswitch/flow_table.c:386:25: got struct mask_cache_entry *cache
net/openvswitch/flow_table.c:411:27: warning: incorrect type in assignment (different address spaces)
net/openvswitch/flow_table.c:411:27: expected struct mask_cache [noderef] __rcu *mask_cache
net/openvswitch/flow_table.c:411:27: got struct mask_cache *
net/openvswitch/flow_table.c:440:35: warning: incorrect type in argument 1 (different address spaces)
net/openvswitch/flow_table.c:440:35: expected struct mask_cache *mc
net/openvswitch/flow_table.c:440:35: got struct mask_cache [noderef] __rcu *mask_cache
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 15:21 ` Jakub Kicinski
@ 2020-07-22 15:31 ` Eelco Chaudron
0 siblings, 0 replies; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-22 15:31 UTC (permalink / raw)
To: Jakub Kicinski; +Cc: netdev, davem, dev, pabeni, pshelar
On 22 Jul 2020, at 17:21, Jakub Kicinski wrote:
> On Wed, 22 Jul 2020 10:27:52 +0200 Eelco Chaudron wrote:
>> This patch makes the masks cache size configurable, or with
>> a size of 0, disable it.
>>
>> Reviewed-by: Paolo Abeni <pabeni@redhat.com>
>> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
>
> Hi Elco!
>
> This patch adds a bunch of new sparse warnings:
>
> net/openvswitch/flow_table.c:376:23: warning: incorrect type in
> assignment (different address spaces)
> net/openvswitch/flow_table.c:376:23: expected struct
> mask_cache_entry *cache
> net/openvswitch/flow_table.c:376:23: got void [noderef] __percpu *
> net/openvswitch/flow_table.c:386:25: warning: incorrect type in
> assignment (different address spaces)
> net/openvswitch/flow_table.c:386:25: expected struct
> mask_cache_entry [noderef] __percpu *mask_cache
> net/openvswitch/flow_table.c:386:25: got struct mask_cache_entry
> *cache
> net/openvswitch/flow_table.c:411:27: warning: incorrect type in
> assignment (different address spaces)
> net/openvswitch/flow_table.c:411:27: expected struct mask_cache
> [noderef] __rcu *mask_cache
> net/openvswitch/flow_table.c:411:27: got struct mask_cache *
> net/openvswitch/flow_table.c:440:35: warning: incorrect type in
> argument 1 (different address spaces)
> net/openvswitch/flow_table.c:440:35: expected struct mask_cache *mc
> net/openvswitch/flow_table.c:440:35: got struct mask_cache
> [noderef] __rcu *mask_cache
Odd, as I’m sure I ran checkpatch :( Will sent an update fixing those!
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
@ 2020-07-22 17:40 ` kernel test robot
2020-07-22 17:40 ` kernel test robot
2020-07-22 19:22 ` Florian Westphal
2 siblings, 0 replies; 11+ messages in thread
From: kernel test robot @ 2020-07-22 17:40 UTC (permalink / raw)
To: Eelco Chaudron, netdev; +Cc: kbuild-all, davem, dev, kuba, pabeni, pshelar
[-- Attachment #1: Type: text/plain, Size: 6275 bytes --]
Hi Eelco,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Eelco-Chaudron/net-openvswitch-masks-cache-enhancements/20200722-163017
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git fa56a987449bcf4c1cb68369a187af3515b85c78
config: x86_64-randconfig-s022-20200719 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-14) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-49-g707c5017-dirty
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
>> net/openvswitch/flow_table.c:376:23: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache_entry *cache @@ got void [noderef] __percpu * @@
>> net/openvswitch/flow_table.c:376:23: sparse: expected struct mask_cache_entry *cache
>> net/openvswitch/flow_table.c:376:23: sparse: got void [noderef] __percpu *
>> net/openvswitch/flow_table.c:386:25: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache_entry [noderef] __percpu *mask_cache @@ got struct mask_cache_entry *cache @@
>> net/openvswitch/flow_table.c:386:25: sparse: expected struct mask_cache_entry [noderef] __percpu *mask_cache
>> net/openvswitch/flow_table.c:386:25: sparse: got struct mask_cache_entry *cache
>> net/openvswitch/flow_table.c:411:27: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache [noderef] __rcu *mask_cache @@ got struct mask_cache * @@
>> net/openvswitch/flow_table.c:411:27: sparse: expected struct mask_cache [noderef] __rcu *mask_cache
>> net/openvswitch/flow_table.c:411:27: sparse: got struct mask_cache *
>> net/openvswitch/flow_table.c:440:35: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct mask_cache *mc @@ got struct mask_cache [noderef] __rcu *mask_cache @@
>> net/openvswitch/flow_table.c:440:35: sparse: expected struct mask_cache *mc
>> net/openvswitch/flow_table.c:440:35: sparse: got struct mask_cache [noderef] __rcu *mask_cache
net/openvswitch/flow_table.c:517:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct callback_head *head @@ got struct callback_head [noderef] __rcu * @@
net/openvswitch/flow_table.c:517:24: sparse: expected struct callback_head *head
net/openvswitch/flow_table.c:517:24: sparse: got struct callback_head [noderef] __rcu *
net/openvswitch/flow_table.c:518:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct callback_head *head @@ got struct callback_head [noderef] __rcu * @@
net/openvswitch/flow_table.c:518:24: sparse: expected struct callback_head *head
net/openvswitch/flow_table.c:518:24: sparse: got struct callback_head [noderef] __rcu *
net/openvswitch/flow_table.c:1166:42: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct sw_flow_mask [noderef] __rcu * @@ got struct sw_flow_mask * @@
net/openvswitch/flow_table.c:1166:42: sparse: expected struct sw_flow_mask [noderef] __rcu *
net/openvswitch/flow_table.c:1166:42: sparse: got struct sw_flow_mask *
vim +376 net/openvswitch/flow_table.c
357
358 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
359 {
360 struct mask_cache_entry *cache = NULL;
361 struct mask_cache *new;
362
363 /* Only allow size to be 0, or a power of 2, and does not exceed
364 * percpu allocation size.
365 */
366 if ((size & (size - 1)) != 0 ||
367 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
368 return NULL;
369
370 new = kzalloc(sizeof(*new), GFP_KERNEL);
371 if (!new)
372 return NULL;
373
374 new->cache_size = size;
375 if (new->cache_size > 0) {
> 376 cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
377 new->cache_size,
378 __alignof__(struct mask_cache_entry));
379
380 if (!cache) {
381 kfree(new);
382 return NULL;
383 }
384 }
385
> 386 new->mask_cache = cache;
387 return new;
388 }
389
390 void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
391 {
392 struct mask_cache *mc = rcu_dereference(table->mask_cache);
393 struct mask_cache *new;
394
395 if (size == mc->cache_size || (size & (size - 1)) != 0)
396 return;
397
398 new = tbl_mask_cache_alloc(size);
399 if (!new)
400 return;
401
402 rcu_assign_pointer(table->mask_cache, new);
403 call_rcu(&mc->rcu, mask_cache_rcu_cb);
404 }
405
406 int ovs_flow_tbl_init(struct flow_table *table)
407 {
408 struct table_instance *ti, *ufid_ti;
409 struct mask_array *ma;
410
> 411 table->mask_cache = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
412 if (!table->mask_cache)
413 return -ENOMEM;
414
415 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
416 if (!ma)
417 goto free_mask_cache;
418
419 ti = table_instance_alloc(TBL_MIN_BUCKETS);
420 if (!ti)
421 goto free_mask_array;
422
423 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
424 if (!ufid_ti)
425 goto free_ti;
426
427 rcu_assign_pointer(table->ti, ti);
428 rcu_assign_pointer(table->ufid_ti, ufid_ti);
429 rcu_assign_pointer(table->mask_array, ma);
430 table->last_rehash = jiffies;
431 table->count = 0;
432 table->ufid_count = 0;
433 return 0;
434
435 free_ti:
436 __table_instance_destroy(ti);
437 free_mask_array:
438 __mask_array_destroy(ma);
439 free_mask_cache:
> 440 __mask_cache_destroy(table->mask_cache);
441 return -ENOMEM;
442 }
443
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 33367 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
@ 2020-07-22 17:40 ` kernel test robot
0 siblings, 0 replies; 11+ messages in thread
From: kernel test robot @ 2020-07-22 17:40 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 6414 bytes --]
Hi Eelco,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Eelco-Chaudron/net-openvswitch-masks-cache-enhancements/20200722-163017
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git fa56a987449bcf4c1cb68369a187af3515b85c78
config: x86_64-randconfig-s022-20200719 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-14) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-49-g707c5017-dirty
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
>> net/openvswitch/flow_table.c:376:23: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache_entry *cache @@ got void [noderef] __percpu * @@
>> net/openvswitch/flow_table.c:376:23: sparse: expected struct mask_cache_entry *cache
>> net/openvswitch/flow_table.c:376:23: sparse: got void [noderef] __percpu *
>> net/openvswitch/flow_table.c:386:25: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache_entry [noderef] __percpu *mask_cache @@ got struct mask_cache_entry *cache @@
>> net/openvswitch/flow_table.c:386:25: sparse: expected struct mask_cache_entry [noderef] __percpu *mask_cache
>> net/openvswitch/flow_table.c:386:25: sparse: got struct mask_cache_entry *cache
>> net/openvswitch/flow_table.c:411:27: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct mask_cache [noderef] __rcu *mask_cache @@ got struct mask_cache * @@
>> net/openvswitch/flow_table.c:411:27: sparse: expected struct mask_cache [noderef] __rcu *mask_cache
>> net/openvswitch/flow_table.c:411:27: sparse: got struct mask_cache *
>> net/openvswitch/flow_table.c:440:35: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct mask_cache *mc @@ got struct mask_cache [noderef] __rcu *mask_cache @@
>> net/openvswitch/flow_table.c:440:35: sparse: expected struct mask_cache *mc
>> net/openvswitch/flow_table.c:440:35: sparse: got struct mask_cache [noderef] __rcu *mask_cache
net/openvswitch/flow_table.c:517:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct callback_head *head @@ got struct callback_head [noderef] __rcu * @@
net/openvswitch/flow_table.c:517:24: sparse: expected struct callback_head *head
net/openvswitch/flow_table.c:517:24: sparse: got struct callback_head [noderef] __rcu *
net/openvswitch/flow_table.c:518:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct callback_head *head @@ got struct callback_head [noderef] __rcu * @@
net/openvswitch/flow_table.c:518:24: sparse: expected struct callback_head *head
net/openvswitch/flow_table.c:518:24: sparse: got struct callback_head [noderef] __rcu *
net/openvswitch/flow_table.c:1166:42: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct sw_flow_mask [noderef] __rcu * @@ got struct sw_flow_mask * @@
net/openvswitch/flow_table.c:1166:42: sparse: expected struct sw_flow_mask [noderef] __rcu *
net/openvswitch/flow_table.c:1166:42: sparse: got struct sw_flow_mask *
vim +376 net/openvswitch/flow_table.c
357
358 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
359 {
360 struct mask_cache_entry *cache = NULL;
361 struct mask_cache *new;
362
363 /* Only allow size to be 0, or a power of 2, and does not exceed
364 * percpu allocation size.
365 */
366 if ((size & (size - 1)) != 0 ||
367 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
368 return NULL;
369
370 new = kzalloc(sizeof(*new), GFP_KERNEL);
371 if (!new)
372 return NULL;
373
374 new->cache_size = size;
375 if (new->cache_size > 0) {
> 376 cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
377 new->cache_size,
378 __alignof__(struct mask_cache_entry));
379
380 if (!cache) {
381 kfree(new);
382 return NULL;
383 }
384 }
385
> 386 new->mask_cache = cache;
387 return new;
388 }
389
390 void ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
391 {
392 struct mask_cache *mc = rcu_dereference(table->mask_cache);
393 struct mask_cache *new;
394
395 if (size == mc->cache_size || (size & (size - 1)) != 0)
396 return;
397
398 new = tbl_mask_cache_alloc(size);
399 if (!new)
400 return;
401
402 rcu_assign_pointer(table->mask_cache, new);
403 call_rcu(&mc->rcu, mask_cache_rcu_cb);
404 }
405
406 int ovs_flow_tbl_init(struct flow_table *table)
407 {
408 struct table_instance *ti, *ufid_ti;
409 struct mask_array *ma;
410
> 411 table->mask_cache = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
412 if (!table->mask_cache)
413 return -ENOMEM;
414
415 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
416 if (!ma)
417 goto free_mask_cache;
418
419 ti = table_instance_alloc(TBL_MIN_BUCKETS);
420 if (!ti)
421 goto free_mask_array;
422
423 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
424 if (!ufid_ti)
425 goto free_ti;
426
427 rcu_assign_pointer(table->ti, ti);
428 rcu_assign_pointer(table->ufid_ti, ufid_ti);
429 rcu_assign_pointer(table->mask_array, ma);
430 table->last_rehash = jiffies;
431 table->count = 0;
432 table->ufid_count = 0;
433 return 0;
434
435 free_ti:
436 __table_instance_destroy(ti);
437 free_mask_array:
438 __mask_array_destroy(ma);
439 free_mask_cache:
> 440 __mask_cache_destroy(table->mask_cache);
441 return -ENOMEM;
442 }
443
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 33367 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
2020-07-22 15:21 ` Jakub Kicinski
2020-07-22 17:40 ` kernel test robot
@ 2020-07-22 19:22 ` Florian Westphal
2020-07-23 9:59 ` Eelco Chaudron
2 siblings, 1 reply; 11+ messages in thread
From: Florian Westphal @ 2020-07-22 19:22 UTC (permalink / raw)
To: Eelco Chaudron; +Cc: netdev, davem, dev, kuba, pabeni, pshelar
Eelco Chaudron <echaudro@redhat.com> wrote:
> This patch makes the masks cache size configurable, or with
> a size of 0, disable it.
>
> Reviewed-by: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
> ---
> include/uapi/linux/openvswitch.h | 1
> net/openvswitch/datapath.c | 11 +++++
> net/openvswitch/flow_table.c | 86 ++++++++++++++++++++++++++++++++++----
> net/openvswitch/flow_table.h | 10 ++++
> 4 files changed, 98 insertions(+), 10 deletions(-)
>
> diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
> index 7cb76e5ca7cf..8300cc29dec8 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -86,6 +86,7 @@ enum ovs_datapath_attr {
> OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
> OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
> OVS_DP_ATTR_PAD,
> + OVS_DP_ATTR_MASKS_CACHE_SIZE,
This new attr should probably get an entry in
datapath.c datapath_policy[].
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -1535,6 +1535,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
> if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
> goto nla_put_failure;
>
> + if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
> + ovs_flow_tbl_masks_cache_size(&dp->table)))
> + goto nla_put_failure;
> +
> genlmsg_end(skb, ovs_header);
> return 0;
ovs_dp_cmd_msg_size() should add another nla_total_size(sizeof(u32))
to make sure there is enough space.
> + if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
> + u32 cache_size;
> +
> + cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
> + ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
> + }
I see a 0 cache size is legal (turns it off) and that the allocation
path has a few sanity checks as well.
Would it make sense to add min/max policy to datapath_policy[] for this
as well?
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-22 19:22 ` Florian Westphal
@ 2020-07-23 9:59 ` Eelco Chaudron
2020-07-23 10:15 ` Florian Westphal
0 siblings, 1 reply; 11+ messages in thread
From: Eelco Chaudron @ 2020-07-23 9:59 UTC (permalink / raw)
To: Florian Westphal; +Cc: netdev, davem, dev, kuba, pabeni, pshelar
On 22 Jul 2020, at 21:22, Florian Westphal wrote:
> Eelco Chaudron <echaudro@redhat.com> wrote:
>> This patch makes the masks cache size configurable, or with
>> a size of 0, disable it.
>>
>> Reviewed-by: Paolo Abeni <pabeni@redhat.com>
>> Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
>> ---
>> include/uapi/linux/openvswitch.h | 1
>> net/openvswitch/datapath.c | 11 +++++
>> net/openvswitch/flow_table.c | 86
>> ++++++++++++++++++++++++++++++++++----
>> net/openvswitch/flow_table.h | 10 ++++
>> 4 files changed, 98 insertions(+), 10 deletions(-)
>>
>> diff --git a/include/uapi/linux/openvswitch.h
>> b/include/uapi/linux/openvswitch.h
>> index 7cb76e5ca7cf..8300cc29dec8 100644
>> --- a/include/uapi/linux/openvswitch.h
>> +++ b/include/uapi/linux/openvswitch.h
>> @@ -86,6 +86,7 @@ enum ovs_datapath_attr {
>> OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
>> OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
>> OVS_DP_ATTR_PAD,
>> + OVS_DP_ATTR_MASKS_CACHE_SIZE,
>
> This new attr should probably get an entry in
> datapath.c datapath_policy[].
Yes, I should have, will fix in v2.
>> --- a/net/openvswitch/datapath.c
>> +++ b/net/openvswitch/datapath.c
>> @@ -1535,6 +1535,10 @@ static int ovs_dp_cmd_fill_info(struct
>> datapath *dp, struct sk_buff *skb,
>> if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
>> goto nla_put_failure;
>>
>> + if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
>> + ovs_flow_tbl_masks_cache_size(&dp->table)))
>> + goto nla_put_failure;
>> +
>> genlmsg_end(skb, ovs_header);
>> return 0;
>
>
> ovs_dp_cmd_msg_size() should add another nla_total_size(sizeof(u32))
> to make sure there is enough space.
Same as above
>> + if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
>> + u32 cache_size;
>> +
>> + cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
>> + ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
>> + }
>
> I see a 0 cache size is legal (turns it off) and that the allocation
> path has a few sanity checks as well.
>
> Would it make sense to add min/max policy to datapath_policy[] for
> this
> as well?
Yes I could add the following:
@@ -1906,7 +1906,8 @@ static const struct nla_policy
datapath_policy[OVS_DP_ATTR_MAX + 1] = {
[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ
- 1 },
[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+ [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
+ PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
};
Let me know your thoughts
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable
2020-07-23 9:59 ` Eelco Chaudron
@ 2020-07-23 10:15 ` Florian Westphal
0 siblings, 0 replies; 11+ messages in thread
From: Florian Westphal @ 2020-07-23 10:15 UTC (permalink / raw)
To: Eelco Chaudron
Cc: Florian Westphal, netdev, davem, dev, kuba, pabeni, pshelar
Eelco Chaudron <echaudro@redhat.com> wrote:
> On 22 Jul 2020, at 21:22, Florian Westphal wrote:
> > I see a 0 cache size is legal (turns it off) and that the allocation
> > path has a few sanity checks as well.
> >
> > Would it make sense to add min/max policy to datapath_policy[] for this
> > as well?
>
> Yes I could add the following:
>
> @@ -1906,7 +1906,8 @@ static const struct nla_policy
> datapath_policy[OVS_DP_ATTR_MAX + 1] = {
> [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1
> },
> [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
> [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
> + [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
> + PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
> };
> Let me know your thoughts
I think its a good idea. When 'max' becomes too restricted one could
rework internal kernel logic to support larger size and userspace
can detect it by probing with a larger size first.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2020-07-23 10:16 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-22 8:27 [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 1/2] net: openvswitch: add masks cache hit counter Eelco Chaudron
2020-07-22 8:27 ` [PATCH net-next 2/2] net: openvswitch: make masks cache size configurable Eelco Chaudron
2020-07-22 15:21 ` Jakub Kicinski
2020-07-22 15:31 ` Eelco Chaudron
2020-07-22 17:40 ` kernel test robot
2020-07-22 17:40 ` kernel test robot
2020-07-22 19:22 ` Florian Westphal
2020-07-23 9:59 ` Eelco Chaudron
2020-07-23 10:15 ` Florian Westphal
2020-07-22 8:37 ` [PATCH net-next 0/2] net: openvswitch: masks cache enhancements Eelco Chaudron
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.