* [RFC PATCH net-next 3/5] bpf: ADD BPF_MAP_TYPE_LRU_HASH
2016-10-02 3:58 [RFC PATCH net-next 1/5] bpf: LRU List Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 2/5] bpf: Refactor codes handling percpu map Martin KaFai Lau
@ 2016-10-02 3:58 ` Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 4/5] bpf: Add BPF_MAP_TYPE_LRU_PERCPU_HASH Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 5/5] bpf: Add tests for the LRU bpf_htab Martin KaFai Lau
3 siblings, 0 replies; 5+ messages in thread
From: Martin KaFai Lau @ 2016-10-02 3:58 UTC (permalink / raw)
To: netdev; +Cc: FB Kernel Team
Provide a LRU version of the existing BPF_MAP_TYPE_HASH.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
---
include/uapi/linux/bpf.h | 1 +
kernel/bpf/hashtab.c | 234 ++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 222 insertions(+), 13 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f896dfa..c153ff8 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -85,6 +85,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
+ BPF_MAP_TYPE_LRU_HASH,
};
enum bpf_prog_type {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a5e3915..1493d98 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -15,6 +15,7 @@
#include <linux/filter.h>
#include <linux/vmalloc.h>
#include "percpu_freelist.h"
+#include "bpf_lru_list.h"
struct bucket {
struct hlist_head head;
@@ -25,7 +26,10 @@ struct bpf_htab {
struct bpf_map map;
struct bucket *buckets;
void *elems;
- struct pcpu_freelist freelist;
+ union {
+ struct pcpu_freelist freelist;
+ struct bpf_lru lru;
+ };
void __percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
@@ -48,11 +52,19 @@ struct htab_elem {
union {
struct rcu_head rcu;
enum extra_elem_state state;
+ struct bpf_lru_node lru_node;
};
u32 hash;
char key[0] __aligned(8);
};
+static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
+
+static bool htab_is_lru(const struct bpf_htab *htab)
+{
+ return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH;
+}
+
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
@@ -87,7 +99,22 @@ free_elems:
vfree(htab->elems);
}
-static int prealloc_elems_and_freelist(struct bpf_htab *htab)
+static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
+ u32 hash)
+{
+ struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
+ struct htab_elem *l;
+
+ if (node) {
+ l = container_of(node, struct htab_elem, lru_node);
+ memcpy(l->key, key, htab->map.key_size);
+ return l;
+ }
+
+ return NULL;
+}
+
+static int prealloc_init(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
@@ -110,12 +137,26 @@ static int prealloc_elems_and_freelist(struct bpf_htab *htab)
}
skip_percpu_elems:
- err = pcpu_freelist_init(&htab->freelist);
+ if (htab_is_lru(htab))
+ err = bpf_lru_init(&htab->lru,
+ offsetof(struct htab_elem, hash) -
+ offsetof(struct htab_elem, lru_node),
+ htab_lru_map_delete_node,
+ htab);
+ else
+ err = pcpu_freelist_init(&htab->freelist);
+
if (err)
goto free_elems;
- pcpu_freelist_populate(&htab->freelist, htab->elems, htab->elem_size,
- htab->map.max_entries);
+ if (htab_is_lru(htab))
+ bpf_lru_populate(&htab->lru, htab->elems,
+ offsetof(struct htab_elem, lru_node),
+ htab->elem_size, htab->map.max_entries);
+ else
+ pcpu_freelist_populate(&htab->freelist, htab->elems,
+ htab->elem_size, htab->map.max_entries);
+
return 0;
free_elems:
@@ -123,6 +164,16 @@ free_elems:
return err;
}
+static void prealloc_destroy(struct bpf_htab *htab)
+{
+ htab_free_elems(htab);
+
+ if (htab_is_lru(htab))
+ bpf_lru_destroy(&htab->lru);
+ else
+ pcpu_freelist_destroy(&htab->freelist);
+}
+
static int alloc_extra_elems(struct bpf_htab *htab)
{
void __percpu *pptr;
@@ -144,6 +195,8 @@ static int alloc_extra_elems(struct bpf_htab *htab)
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
+ bool lru = attr->map_type == BPF_MAP_TYPE_LRU_HASH;
+ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
int err, i;
u64 cost;
@@ -152,6 +205,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* reserved bits should not be used */
return ERR_PTR(-EINVAL);
+ if (lru && !prealloc)
+ return ERR_PTR(-ENOTSUPP);
+
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
return ERR_PTR(-ENOMEM);
@@ -241,14 +297,14 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->buckets[i].lock);
}
- if (!percpu) {
+ if (!percpu && !lru) {
err = alloc_extra_elems(htab);
if (err)
goto free_buckets;
}
- if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
- err = prealloc_elems_and_freelist(htab);
+ if (prealloc) {
+ err = prealloc_init(htab);
if (err)
goto free_extra_elems;
}
@@ -323,6 +379,47 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
return NULL;
}
+static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
+ if (l) {
+ bpf_lru_node_set_ref(&l->lru_node);
+ return l->key + round_up(map->key_size, 8);
+ }
+
+ return NULL;
+}
+
+/* It is called from the bpf_lru_list when the LRU needs to delete
+ * older elements from the htab.
+ */
+static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
+{
+ struct bpf_htab *htab = (struct bpf_htab *)arg;
+ struct htab_elem *l, *tgt_l;
+ struct hlist_head *head;
+ unsigned long flags;
+ struct bucket *b;
+
+ tgt_l = container_of(node, struct htab_elem, lru_node);
+ b = __select_bucket(htab, tgt_l->hash);
+ head = &b->head;
+
+ raw_spin_lock_irqsave(&b->lock, flags);
+
+ hlist_for_each_entry_rcu(l, head, hash_node)
+ if (l == tgt_l) {
+ hlist_del_rcu(&l->hash_node);
+ break;
+ }
+
+
+ raw_spin_unlock_irqrestore(&b->lock, flags);
+
+ return l == tgt_l;
+}
+
/* Called from syscall */
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
@@ -579,6 +676,70 @@ err:
return ret;
}
+static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct htab_elem *l_new, *l_old = NULL;
+ struct hlist_head *head;
+ unsigned long flags;
+ struct bucket *b;
+ u32 key_size, hash;
+ int ret;
+
+ if (unlikely(map_flags > BPF_EXIST))
+ /* unknown flags */
+ return -EINVAL;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+
+ b = __select_bucket(htab, hash);
+ head = &b->head;
+
+ /* For LRU, we need to alloc before taking bucket's
+ * spinlock because getting free nodes from LRU may need
+ * to remove older elements from htab and this removal
+ * operation will need a bucket lock.
+ */
+ l_new = prealloc_lru_pop(htab, key, hash);
+ if (!l_new)
+ return -ENOMEM;
+ memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
+
+ /* bpf_map_update_elem() can be called in_irq() */
+ raw_spin_lock_irqsave(&b->lock, flags);
+
+ l_old = lookup_elem_raw(head, hash, key, key_size);
+
+ ret = check_flags(htab, l_old, map_flags);
+ if (ret)
+ goto err;
+
+ /* add new element to the head of the list, so that
+ * concurrent search will find it before old elem
+ */
+ hlist_add_head_rcu(&l_new->hash_node, head);
+ if (l_old) {
+ bpf_lru_node_set_ref(&l_new->lru_node);
+ hlist_del_rcu(&l_old->hash_node);
+ }
+ ret = 0;
+
+err:
+ raw_spin_unlock_irqrestore(&b->lock, flags);
+
+ if (ret)
+ bpf_lru_push_free(&htab->lru, &l_new->lru_node);
+ else if (l_old)
+ bpf_lru_push_free(&htab->lru, &l_old->lru_node);
+
+ return ret;
+}
+
static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags,
bool onallcpus)
@@ -671,6 +832,39 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
return ret;
}
+static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct hlist_head *head;
+ struct bucket *b;
+ struct htab_elem *l;
+ unsigned long flags;
+ u32 hash, key_size;
+ int ret = -ENOENT;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+ b = __select_bucket(htab, hash);
+ head = &b->head;
+
+ raw_spin_lock_irqsave(&b->lock, flags);
+
+ l = lookup_elem_raw(head, hash, key, key_size);
+
+ if (l) {
+ hlist_del_rcu(&l->hash_node);
+ ret = 0;
+ }
+
+ raw_spin_unlock_irqrestore(&b->lock, flags);
+ if (l)
+ bpf_lru_push_free(&htab->lru, &l->lru_node);
+ return ret;
+}
+
static void delete_all_elements(struct bpf_htab *htab)
{
int i;
@@ -702,12 +896,11 @@ static void htab_map_free(struct bpf_map *map)
* not have executed. Wait for them.
*/
rcu_barrier();
- if (htab->map.map_flags & BPF_F_NO_PREALLOC) {
+ if (htab->map.map_flags & BPF_F_NO_PREALLOC)
delete_all_elements(htab);
- } else {
- htab_free_elems(htab);
- pcpu_freelist_destroy(&htab->freelist);
- }
+ else
+ prealloc_destroy(htab);
+
free_percpu(htab->extra_elems);
kvfree(htab->buckets);
kfree(htab);
@@ -727,6 +920,20 @@ static struct bpf_map_type_list htab_type __read_mostly = {
.type = BPF_MAP_TYPE_HASH,
};
+static const struct bpf_map_ops htab_lru_ops = {
+ .map_alloc = htab_map_alloc,
+ .map_free = htab_map_free,
+ .map_get_next_key = htab_map_get_next_key,
+ .map_lookup_elem = htab_lru_map_lookup_elem,
+ .map_update_elem = htab_lru_map_update_elem,
+ .map_delete_elem = htab_lru_map_delete_elem,
+};
+
+static struct bpf_map_type_list htab_lru_type __read_mostly = {
+ .ops = &htab_lru_ops,
+ .type = BPF_MAP_TYPE_LRU_HASH,
+};
+
/* Called from eBPF program */
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{
@@ -797,6 +1004,7 @@ static int __init register_htab_map(void)
{
bpf_register_map_type(&htab_type);
bpf_register_map_type(&htab_percpu_type);
+ bpf_register_map_type(&htab_lru_type);
return 0;
}
late_initcall(register_htab_map);
--
2.5.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH net-next 4/5] bpf: Add BPF_MAP_TYPE_LRU_PERCPU_HASH
2016-10-02 3:58 [RFC PATCH net-next 1/5] bpf: LRU List Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 2/5] bpf: Refactor codes handling percpu map Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 3/5] bpf: ADD BPF_MAP_TYPE_LRU_HASH Martin KaFai Lau
@ 2016-10-02 3:58 ` Martin KaFai Lau
2016-10-02 3:58 ` [RFC PATCH net-next 5/5] bpf: Add tests for the LRU bpf_htab Martin KaFai Lau
3 siblings, 0 replies; 5+ messages in thread
From: Martin KaFai Lau @ 2016-10-02 3:58 UTC (permalink / raw)
To: netdev; +Cc: FB Kernel Team
Provide a LRU version of the existing BPF_MAP_TYPE_PERCPU_HASH
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
---
include/uapi/linux/bpf.h | 1 +
kernel/bpf/hashtab.c | 129 ++++++++++++++++++++++++++++++++++++++++++++---
kernel/bpf/syscall.c | 8 ++-
3 files changed, 130 insertions(+), 8 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c153ff8..b443675 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -86,6 +86,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH,
};
enum bpf_prog_type {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 1493d98..92c546e 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -62,7 +62,14 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
static bool htab_is_lru(const struct bpf_htab *htab)
{
- return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH;
+ return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
+ htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
+}
+
+static bool htab_is_percpu(const struct bpf_htab *htab)
+{
+ return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
@@ -85,7 +92,7 @@ static void htab_free_elems(struct bpf_htab *htab)
{
int i;
- if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
+ if (!htab_is_percpu(htab))
goto free_elems;
for (i = 0; i < htab->map.max_entries; i++) {
@@ -122,7 +129,7 @@ static int prealloc_init(struct bpf_htab *htab)
if (!htab->elems)
return -ENOMEM;
- if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
+ if (!htab_is_percpu(htab))
goto skip_percpu_elems;
for (i = 0; i < htab->map.max_entries; i++) {
@@ -194,8 +201,10 @@ static int alloc_extra_elems(struct bpf_htab *htab)
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
- bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
- bool lru = attr->map_type == BPF_MAP_TYPE_LRU_HASH;
+ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
int err, i;
@@ -793,12 +802,84 @@ err:
return ret;
}
+static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags,
+ bool onallcpus)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct htab_elem *l_new = NULL, *l_old;
+ struct hlist_head *head;
+ unsigned long flags;
+ struct bucket *b;
+ u32 key_size, hash;
+ int ret;
+
+ if (unlikely(map_flags > BPF_EXIST))
+ /* unknown flags */
+ return -EINVAL;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+
+ b = __select_bucket(htab, hash);
+ head = &b->head;
+
+ /* For LRU, we need to alloc before taking bucket's
+ * spinlock because LRU's elem alloc may need
+ * to remove older elem from htab and this removal
+ * operation will need a bucket lock.
+ */
+ if (map_flags != BPF_EXIST) {
+ l_new = prealloc_lru_pop(htab, key, hash);
+ if (!l_new)
+ return -ENOMEM;
+ }
+
+ /* bpf_map_update_elem() can be called in_irq() */
+ raw_spin_lock_irqsave(&b->lock, flags);
+
+ l_old = lookup_elem_raw(head, hash, key, key_size);
+
+ ret = check_flags(htab, l_old, map_flags);
+ if (ret)
+ goto err;
+
+ if (l_old) {
+ bpf_lru_node_set_ref(&l_old->lru_node);
+
+ /* per-cpu hash map can update value in-place */
+ pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
+ value, onallcpus);
+ } else {
+ pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
+ value, onallcpus);
+ hlist_add_head_rcu(&l_new->hash_node, head);
+ l_new = NULL;
+ }
+ ret = 0;
+err:
+ raw_spin_unlock_irqrestore(&b->lock, flags);
+ if (l_new)
+ bpf_lru_push_free(&htab->lru, &l_new->lru_node);
+ return ret;
+}
+
static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
}
+static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
+ false);
+}
+
/* Called from syscall or from eBPF program */
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
@@ -945,8 +1026,21 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
return NULL;
}
+static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
+ if (l) {
+ bpf_lru_node_set_ref(&l->lru_node);
+ return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
+ }
+
+ return NULL;
+}
+
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l;
void __percpu *pptr;
int ret = -ENOENT;
@@ -962,6 +1056,8 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
l = __htab_map_lookup_elem(map, key);
if (!l)
goto out;
+ if (htab_is_lru(htab))
+ bpf_lru_node_set_ref(&l->lru_node);
pptr = htab_elem_get_ptr(l, map->key_size);
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off,
@@ -977,10 +1073,16 @@ out:
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
int ret;
rcu_read_lock();
- ret = __htab_percpu_map_update_elem(map, key, value, map_flags, true);
+ if (htab_is_lru(htab))
+ ret = __htab_lru_percpu_map_update_elem(map, key, value,
+ map_flags, true);
+ else
+ ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
+ true);
rcu_read_unlock();
return ret;
@@ -1000,11 +1102,26 @@ static struct bpf_map_type_list htab_percpu_type __read_mostly = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
};
+static const struct bpf_map_ops htab_lru_percpu_ops = {
+ .map_alloc = htab_map_alloc,
+ .map_free = htab_map_free,
+ .map_get_next_key = htab_map_get_next_key,
+ .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
+ .map_update_elem = htab_lru_percpu_map_update_elem,
+ .map_delete_elem = htab_lru_map_delete_elem,
+};
+
+static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
+ .ops = &htab_lru_percpu_ops,
+ .type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
+};
+
static int __init register_htab_map(void)
{
bpf_register_map_type(&htab_type);
bpf_register_map_type(&htab_percpu_type);
bpf_register_map_type(&htab_lru_type);
+ bpf_register_map_type(&htab_lru_percpu_type);
return 0;
}
late_initcall(register_htab_map);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 228f962..bc69eb4 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -295,6 +295,7 @@ static int map_lookup_elem(union bpf_attr *attr)
goto free_key;
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
value_size = round_up(map->value_size, 8) * num_possible_cpus();
else
@@ -305,7 +306,8 @@ static int map_lookup_elem(union bpf_attr *attr)
if (!value)
goto free_key;
- if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
@@ -369,6 +371,7 @@ static int map_update_elem(union bpf_attr *attr)
goto free_key;
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
value_size = round_up(map->value_size, 8) * num_possible_cpus();
else
@@ -388,7 +391,8 @@ static int map_update_elem(union bpf_attr *attr)
*/
preempt_disable();
__this_cpu_inc(bpf_prog_active);
- if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_update(map, key, value, attr->flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_update(map, key, value, attr->flags);
--
2.5.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH net-next 5/5] bpf: Add tests for the LRU bpf_htab
2016-10-02 3:58 [RFC PATCH net-next 1/5] bpf: LRU List Martin KaFai Lau
` (2 preceding siblings ...)
2016-10-02 3:58 ` [RFC PATCH net-next 4/5] bpf: Add BPF_MAP_TYPE_LRU_PERCPU_HASH Martin KaFai Lau
@ 2016-10-02 3:58 ` Martin KaFai Lau
3 siblings, 0 replies; 5+ messages in thread
From: Martin KaFai Lau @ 2016-10-02 3:58 UTC (permalink / raw)
To: netdev; +Cc: FB Kernel Team
This patch has some unit tests and a test_lru_dist.
The test_lru_dist reads in the numeric keys from a file.
The files used here are generated by a modified fio-genzipf tool
originated from the fio test suit. The sample data file can be
found here: https://github.com/iamkafai/bpf-lru
The zipf.* data files have 100k numeric keys and the key is also
ranged from 1 to 100k.
The test_lru_dist outputs the number of unique keys (nr_unique).
F.e. The following means, 61239 of them is unique out of 100k keys.
nr_misses means it cannot be found in the LRU map, so nr_misses
must be >= nr_unique.
[root@arch-vm1 ~]# ./test_lru_dist zipf.100k.a0_01.out 40000
task:0 do_test_lru_dist:......
task:0 BPF LRU: nr_unique:61239(/100000) nr_misses:67054(/100000)
task:0 Perfect LRU: nr_unique:61239(/100000 nr_misses:66993(/100000)
[root@arch-vm1 ~]# ./test_lru_dist zipf.100k.a1_01.out 4000
task:0 BPF LRU: nr_unique:23093(/100000) nr_misses:31603(/100000)
task:0 Perfect LRU: nr_unique:23093(/100000 nr_misses:34328(/100000)
test_lru_dist also simulates a perfect LRU map as a comparison.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
---
samples/bpf/Makefile | 4 +-
samples/bpf/map_perf_test_kern.c | 20 ++
samples/bpf/map_perf_test_user.c | 17 +
samples/bpf/test_lru_dist.c | 322 ++++++++++++++++++
samples/bpf/test_lru_map.c | 690 +++++++++++++++++++++++++++++++++++++++
5 files changed, 1052 insertions(+), 1 deletion(-)
create mode 100644 samples/bpf/test_lru_dist.c
create mode 100644 samples/bpf/test_lru_map.c
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 12b7304..55052af 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -2,7 +2,7 @@
obj- := dummy.o
# List of programs to build
-hostprogs-y := test_verifier test_maps
+hostprogs-y := test_verifier test_maps test_lru_map test_lru_dist
hostprogs-y += sock_example
hostprogs-y += fds_example
hostprogs-y += sockex1
@@ -30,6 +30,8 @@ hostprogs-y += sampleip
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
+test_lru_map-objs := test_lru_map.o libbpf.o
+test_lru_dist-objs := test_lru_dist.o libbpf.o
sock_example-objs := sock_example.o libbpf.o
fds_example-objs := bpf_load.o libbpf.o fds_example.o
sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 311538e..abb11af 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -19,6 +19,13 @@ struct bpf_map_def SEC("maps") hash_map = {
.max_entries = MAX_ENTRIES,
};
+struct bpf_map_def SEC("maps") lru_hash_map = {
+ .type = BPF_MAP_TYPE_LRU_HASH,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(long),
+ .max_entries = 10000,
+};
+
struct bpf_map_def SEC("maps") percpu_hash_map = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
@@ -53,6 +60,7 @@ int stress_hmap(struct pt_regs *ctx)
value = bpf_map_lookup_elem(&hash_map, &key);
if (value)
bpf_map_delete_elem(&hash_map, &key);
+
return 0;
}
@@ -96,5 +104,17 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
return 0;
}
+
+SEC("kprobe/sys_getpid")
+int stress_lru_hmap_alloc(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_prandom_u32();
+ long val = 1;
+
+ bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 3147377..db7fa74 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -35,6 +35,7 @@ static __u64 time_get_ns(void)
#define PERCPU_HASH_PREALLOC (1 << 1)
#define HASH_KMALLOC (1 << 2)
#define PERCPU_HASH_KMALLOC (1 << 3)
+#define LRU_HASH_PREALLOC (1 << 4)
static int test_flags = ~0;
@@ -50,6 +51,19 @@ static void test_hash_prealloc(int cpu)
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
}
+static void test_lru_hash_prealloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < MAX_CNT; i++)
+ syscall(__NR_getpid);
+ printf("%d:lru_hash_map_perf pre-alloc %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
+
static void test_percpu_hash_prealloc(int cpu)
{
__u64 start_time;
@@ -105,6 +119,9 @@ static void loop(int cpu)
if (test_flags & PERCPU_HASH_KMALLOC)
test_percpu_hash_kmalloc(cpu);
+
+ if (test_flags & LRU_HASH_PREALLOC)
+ test_lru_hash_prealloc(cpu);
}
static void run_perf_test(int tasks)
diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c
new file mode 100644
index 0000000..376e14c
--- /dev/null
+++ b/samples/bpf/test_lru_dist.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define _GNU_SOURCE
+#include <linux/types.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <time.h>
+#include "libbpf.h"
+
+#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
+
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+struct pfect_lru_node {
+ struct list_head list;
+ unsigned long long key;
+};
+
+struct pfect_lru {
+ struct list_head list;
+ struct pfect_lru_node *free_nodes;
+ unsigned int cur_size;
+ unsigned int lru_size;
+ unsigned int nr_unique;
+ unsigned int nr_misses;
+ unsigned int total;
+ int map_fd;
+};
+
+static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size,
+ unsigned int nr_possible_elems)
+{
+ lru->map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(unsigned long long),
+ sizeof(struct pfect_lru_node *),
+ nr_possible_elems, 0);
+ assert(lru->map_fd != -1);
+
+ lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node));
+ assert(lru->free_nodes);
+
+ INIT_LIST_HEAD(&lru->list);
+ lru->cur_size = 0;
+ lru->lru_size = lru_size;
+ lru->nr_unique = lru->nr_misses = lru->total = 0;
+}
+
+static void pfect_lru_destroy(struct pfect_lru *lru)
+{
+ close(lru->map_fd);
+ free(lru->free_nodes);
+}
+
+static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
+ unsigned long long key)
+{
+ struct pfect_lru_node *node = NULL;
+ int seen = 0;
+
+ lru->total++;
+ if (!bpf_lookup_elem(lru->map_fd, &key, &node)) {
+ if (node) {
+ list_move(&node->list, &lru->list);
+ return 1;
+ }
+ seen = 1;
+ }
+
+ if (lru->cur_size < lru->lru_size) {
+ node = &lru->free_nodes[lru->cur_size++];
+ INIT_LIST_HEAD(&node->list);
+ } else {
+ struct pfect_lru_node *null_node = NULL;
+
+ node = list_last_entry(&lru->list,
+ struct pfect_lru_node,
+ list);
+ bpf_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
+ }
+
+ node->key = key;
+ list_move(&node->list, &lru->list);
+
+ lru->nr_misses++;
+ if (seen) {
+ assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
+ } else {
+ lru->nr_unique++;
+ assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
+ }
+
+ return seen;
+}
+
+static unsigned int read_keys(const char *dist_file,
+ unsigned long long **keys)
+{
+ struct stat fst;
+ unsigned long long *retkeys;
+ unsigned int counts = 0;
+ int dist_fd;
+ char *b, *l;
+ int i;
+
+ dist_fd = open(dist_file, 0);
+ assert(dist_fd != -1);
+
+ assert(fstat(dist_fd, &fst) == 0);
+ b = malloc(fst.st_size);
+ assert(b);
+
+ assert(read(dist_fd, b, fst.st_size) == fst.st_size);
+ close(dist_fd);
+ for (i = 0; i < fst.st_size; i++) {
+ if (b[i] == '\n')
+ counts++;
+ }
+ counts++; /* in case the last line has no \n */
+
+ retkeys = malloc(counts * sizeof(unsigned long long));
+ assert(retkeys);
+
+ counts = 0;
+ for (l = strtok(b, "\n"); l; l = strtok(NULL, "\n"))
+ retkeys[counts++] = strtoull(l, NULL, 10);
+ free(b);
+
+ *keys = retkeys;
+
+ return counts;
+}
+
+static void do_test_lru_dist(int lru_map_fd, int task,
+ const unsigned long long *keys,
+ unsigned int key_counts, unsigned int lru_size)
+{
+ unsigned int nr_misses = 0;
+ struct pfect_lru pfect_lru;
+ unsigned long long key_offset = task * key_counts;
+ unsigned long long key, value = 1234;
+ unsigned int i;
+
+ printf("task:%d %s:......\n", task, __func__);
+
+ pfect_lru_init(&pfect_lru, lru_size, key_counts);
+
+ for (i = 0; i < key_counts; i++) {
+ key = keys[i] + key_offset;
+
+ pfect_lru_lookup_or_insert(&pfect_lru, key);
+
+ if (!bpf_lookup_elem(lru_map_fd, &key, &value))
+ continue;
+
+ if (bpf_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
+ printf("bpf_update_elem(lru_map_fd, %llu): errno:%d\n",
+ key, errno);
+ assert(0);
+ }
+
+ nr_misses++;
+ }
+
+ printf(" task:%d BPF LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
+ task, pfect_lru.nr_unique, key_counts, nr_misses, key_counts);
+ printf(" task:%d Perfect LRU: nr_unique:%u(/%u nr_misses:%u(/%u)\n",
+ task, pfect_lru.nr_unique, pfect_lru.total,
+ pfect_lru.nr_misses, pfect_lru.total);
+
+ pfect_lru_destroy(&pfect_lru);
+}
+
+static void test_lru_dist(int map_type, const unsigned long long *keys,
+ unsigned int key_counts, unsigned int lru_size)
+{
+ cpu_set_t cpuset;
+ int lru_map_fd;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(!sched_setaffinity(0, sizeof(cpuset), &cpuset));
+
+ lru_map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
+ sizeof(unsigned long long),
+ lru_size, 0);
+ assert(lru_map_fd != -1);
+ do_test_lru_dist(lru_map_fd, 0, keys, key_counts, lru_size);
+ close(lru_map_fd);
+}
+
+static void test_parallel_lru_dist(int map_type, int nr_tasks,
+ const unsigned long long *keys,
+ unsigned int key_counts,
+ unsigned int lru_size)
+{
+ cpu_set_t cpuset;
+ pid_t pid[nr_tasks];
+ int lru_map_fd;
+ int i;
+
+
+ lru_map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
+ sizeof(unsigned long long),
+ nr_tasks * lru_size, 0);
+ assert(lru_map_fd != -1);
+
+ for (i = 0; i < nr_tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(i, &cpuset);
+ assert(!sched_setaffinity(0, sizeof(cpuset), &cpuset));
+ do_test_lru_dist(lru_map_fd, i, keys, key_counts,
+ lru_size);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < nr_tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+
+ close(lru_map_fd);
+}
+
+int main(int argc, char **argv)
+{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ const char *dist_file = argv[1];
+ int lru_size = atoi(argv[2]);
+ unsigned long long *keys = NULL;
+ unsigned int counts;
+
+ setbuf(stdout, NULL);
+
+ assert(!setrlimit(RLIMIT_MEMLOCK, &r));
+
+ counts = read_keys(dist_file, &keys);
+ test_lru_dist(BPF_MAP_TYPE_LRU_HASH, keys, counts, lru_size);
+ if (argc > 3)
+ test_parallel_lru_dist(BPF_MAP_TYPE_LRU_HASH, atoi(argv[3]),
+ keys, counts, lru_size);
+
+ free(keys);
+
+ return 0;
+}
diff --git a/samples/bpf/test_lru_map.c b/samples/bpf/test_lru_map.c
new file mode 100644
index 0000000..82cfffc
--- /dev/null
+++ b/samples/bpf/test_lru_map.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <time.h>
+#include "libbpf.h"
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define LOCAL_FREE_TARGET (128)
+
+static long nr_cpus;
+
+static int create_map(int map_type, unsigned int size)
+{
+ int map_fd;
+
+ map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
+ sizeof(unsigned long long), size, 0);
+
+ if (map_fd == -1)
+ perror("bpf_create_map");
+
+ return map_fd;
+}
+
+static int map_subset(int map0, int map1)
+{
+ unsigned long long next_key = 0;
+ unsigned long long value0[nr_cpus], value1[nr_cpus];
+ int ret;
+
+ while (!bpf_get_next_key(map1, &next_key, &next_key)) {
+ assert(!bpf_lookup_elem(map1, &next_key, value1));
+ ret = bpf_lookup_elem(map0, &next_key, value0);
+ if (ret) {
+ printf("key:%llu not found from map. %s(%d)\n",
+ next_key, strerror(errno), errno);
+ return 0;
+ }
+ if (value0[0] != value1[0]) {
+ printf("key:%llu value0:%llu != value1:%llu\n",
+ next_key, value0[0], value1[0]);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int map_equal(int lru_map, int expected)
+{
+ return map_subset(lru_map, expected) && map_subset(expected, lru_map);
+}
+
+/* Size of the LRU amp is 2
+ * Add key=1 (+1 key)
+ * Add key=2 (+1 key)
+ * Lookup Key=1
+ * Add Key=3
+ * => Key=2 will be removed by LRU
+ * Iterate map. Only found key=1 and key=3
+ */
+static void test_lru_sanity0(int map_type)
+{
+ unsigned long long key, value[nr_cpus];
+ cpu_set_t cpuset;
+ int map_fd, expected_map_fd;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(!sched_setaffinity(0, sizeof(cpuset), &cpuset));
+
+ map_fd = create_map(map_type, 2);
+ assert(map_fd != -1);
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 2);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* insert key=1 element */
+
+ key = 1;
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value, BPF_NOEXIST));
+
+ /* BPF_NOEXIST means: add new element if it doesn't exist */
+ assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 &&
+ /* key=1 already exists */
+ errno == EEXIST);
+
+ assert(bpf_update_elem(map_fd, &key, value, -1) == -1 &&
+ errno == EINVAL);
+
+ /* insert key=2 element */
+
+ /* check that key=2 is not found */
+ key = 2;
+ assert(bpf_lookup_elem(map_fd, &key, value) == -1 && errno == ENOENT);
+
+ /* BPF_EXIST means: update existing element */
+ assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == -1 &&
+ /* key=2 is not there */
+ errno == ENOENT);
+
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+
+ /* insert key=3 element */
+
+ /* check that key=3 is not found */
+ key = 3;
+ assert(bpf_lookup_elem(map_fd, &key, value) == -1 && errno == ENOENT);
+
+ /* check that key=1 can be found and mark the ref bit to
+ * stop LRU from removing key=1
+ */
+ key = 1;
+ assert(!bpf_lookup_elem(map_fd, &key, value));
+ assert(value[0] == 1234);
+
+ key = 3;
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value, BPF_NOEXIST));
+
+ /* key=2 has been removed from the LRU */
+ key = 2;
+ assert(bpf_lookup_elem(map_fd, &key, value) == -1);
+
+ assert(map_equal(map_fd, expected_map_fd));
+
+ close(map_fd);
+
+ printf("Pass\n");
+}
+
+/* Size of the LRU map is 1.5*LOCAL_FREE_TARGET
+ * Insert 1 to LOCAL_FREE_TARGET (+LOCAL_FREE_TARGET keys)
+ * Lookup 1 to LOCAL_FREE_TARGET/2
+ * Insert 1+LOCAL_FREE_TARGET to 2*LOCAL_FREE_TARGET (+LOCAL_FREE_TARGET keys)
+ * => 1+LOCAL_FREE_TARGET/2 to LOCALFREE_TARGET will be removed by LRU
+ */
+static void test_lru_sanity1(int map_type)
+{
+ unsigned long long key, end_key, value[nr_cpus];
+ unsigned int map_size;
+ int lru_map_fd, expected_map_fd;
+ unsigned int batch_size;
+ cpu_set_t cpuset;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ batch_size = LOCAL_FREE_TARGET / 2;
+ assert(batch_size * 2 == LOCAL_FREE_TARGET);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(sched_setaffinity(0, sizeof(cpuset), &cpuset) == 0);
+
+ map_size = LOCAL_FREE_TARGET + batch_size;
+ lru_map_fd = create_map(map_type, map_size);
+ assert(lru_map_fd != -1);
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, map_size);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* Insert 1 to LOCAL_FREE_TARGET (+LOCAL_FREE_TARGET keys) */
+ end_key = 1 + LOCAL_FREE_TARGET;
+ for (key = 1; key < end_key; key++)
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* Lookup 1 to LOCAL_FREE_TARGET/2 */
+ end_key = 1 + batch_size;
+ for (key = 1; key < end_key; key++) {
+ assert(!bpf_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ /* Insert 1+LOCAL_FREE_TARGET to 2*LOCAL_FREE_TARGET
+ * => 1+LOCAL_FREE_TARGET/2 to LOCALFREE_TARGET will be
+ * removed by LRU
+ */
+ key = 1 + LOCAL_FREE_TARGET;
+ end_key = key + LOCAL_FREE_TARGET;
+ for (; key < end_key; key++) {
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
+/* Size of the LRU map 1.5 * LOCAL_FREE_TARGET
+ * Insert 1 to LOCAL_FREE_TARGET (+LOCAL_FREE_TARGET keys)
+ * Update 1 to LOCAL_FREE_TARGET/2
+ * => The original 1 to LOCAL_FREE_TARGET/2 will be removed due to
+ * the LRU shrink process
+ * Re-insert 1 to LOCAL_FREE_TARGET/2 again and do a lookup immeidately
+ * Insert 1+LOCAL_FREE_TARGET to LOCAL_FREE_TARGET*3/2
+ * Insert 1+LOCAL_FREE_TARGET*3/2 to LOCAL_FREE_TARGET*5/2
+ * => Key 1+LOCAL_FREE_TARGET to LOCAL_FREE_TARGET*3/2
+ * will be removed from LRU because it has never
+ * been lookup and ref bit is not set
+ */
+static void test_lru_sanity2(int map_type)
+{
+ unsigned long long key, value[nr_cpus];
+ unsigned long long end_key;
+ int lru_map_fd, expected_map_fd;
+ unsigned int batch_size;
+ unsigned int map_size;
+ cpu_set_t cpuset;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ batch_size = LOCAL_FREE_TARGET / 2;
+ assert(batch_size * 2 == LOCAL_FREE_TARGET);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(sched_setaffinity(0, sizeof(cpuset), &cpuset) == 0);
+
+ map_size = LOCAL_FREE_TARGET + batch_size;
+ lru_map_fd = create_map(map_type, map_size);
+ assert(lru_map_fd != -1);
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, map_size);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* Insert 1 to LOCAL_FREE_TARGET (+LOCAL_FREE_TARGET keys) */
+ end_key = 1 + LOCAL_FREE_TARGET;
+ for (key = 1; key < end_key; key++)
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* Any bpf_update_elem will require to acquire a new node
+ * from LRU first.
+ *
+ * The local list is running out of free nodes.
+ * It gets from the global LRU list which tries to
+ * shrink the inactive list to get LOCAL_FREE_TARGET
+ * number of free nodes.
+ *
+ * Hence, the oldest key 1 to LOCAL_FREE_TARGET/2
+ * are removed from the LRU list.
+ */
+ key = 1;
+ if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_delete_elem(lru_map_fd, &key));
+ } else
+ assert(bpf_update_elem(lru_map_fd, &key, value, BPF_EXIST));
+
+
+ /* Re-insert 1 to LOCAL_FREE_TARGET/2 again and do a lookup
+ * immeidately.
+ */
+ end_key = 1 + batch_size;
+ value[0] = 4321;
+ for (key = 1; key < end_key; key++) {
+ assert(bpf_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_lookup_elem(lru_map_fd, &key, value));
+ assert(value[0] == 4321);
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ value[0] = 1234;
+
+ /* Insert 1+LOCAL_FREE_TARGET to LOCAL_FREE_TARGET*3/2 */
+ end_key = 1 + LOCAL_FREE_TARGET + batch_size;
+ for (key = 1 + LOCAL_FREE_TARGET; key < end_key; key++)
+ /* These newly added but not referenced keys will be
+ * gone during the next LRU shrink.
+ */
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* Insert 1+LOCAL_FREE_TARGET*3/2 to LOCAL_FREE_TARGET*5/2 */
+ end_key = key + LOCAL_FREE_TARGET;
+ for (; key < end_key; key++) {
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
+/* Size of the LRU map is 2*LOCAL_FREE_TARGET
+ * It is to test the active/inactive list rotation
+ * Insert 1 to 2*LOCAL_FREE_TARGET (+2*LOCAL_FREE_TARGET keys)
+ * Lookup key 1 to LOCAL_FREE_TARGET*3/2
+ * Add 1+2*LOCAL_FREE_TARGET to LOCAL_FREE_TARGET*5/2 (+LOCAL_FREE_TARGET/2 keys)
+ * => key 1+LOCAL_FREE_TARGET*3/2 to 2*LOCAL_FREE_TARGET are removed from LRU
+ */
+static void test_lru_sanity3(int map_type)
+{
+ unsigned long long key, end_key, value[nr_cpus];
+ int lru_map_fd, expected_map_fd;
+ unsigned int batch_size;
+ unsigned int map_size;
+ cpu_set_t cpuset;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ batch_size = LOCAL_FREE_TARGET / 2;
+ assert(batch_size * 2 == LOCAL_FREE_TARGET);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(sched_setaffinity(0, sizeof(cpuset), &cpuset) == 0);
+
+ map_size = LOCAL_FREE_TARGET * 2;
+ lru_map_fd = create_map(map_type, map_size);
+ assert(lru_map_fd != -1);
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, map_size);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* Insert 1 to 2*LOCAL_FREE_TARGET (+2*LOCAL_FREE_TARGET keys) */
+ end_key = 1 + (2 * LOCAL_FREE_TARGET);
+ for (key = 1; key < end_key; key++)
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* Lookup key 1 to LOCAL_FREE_TARGET*3/2 */
+ end_key = LOCAL_FREE_TARGET + batch_size;
+ for (key = 1; key < end_key; key++) {
+ assert(!bpf_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ /* Add 1+2*LOCAL_FREE_TARGET to LOCAL_FREE_TARGET*5/2
+ * (+LOCAL_FREE_TARGET/2 keys)
+ */
+ key = 2 * LOCAL_FREE_TARGET + 1;
+ end_key = key + batch_size;
+ for (; key < end_key; key++) {
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
+/* Test deletion */
+static void test_lru_sanity4(int map_type)
+{
+ int lru_map_fd, expected_map_fd;
+ unsigned long long key, value[nr_cpus];
+ unsigned long long end_key;
+ cpu_set_t cpuset;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ assert(sched_setaffinity(0, sizeof(cpuset), &cpuset) == 0);
+
+ lru_map_fd = create_map(map_type, 3 * LOCAL_FREE_TARGET);
+ assert(lru_map_fd != -1);
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH,
+ 3 * LOCAL_FREE_TARGET);
+
+ value[0] = 1234;
+
+ for (key = 1; key <= 2 * LOCAL_FREE_TARGET; key++)
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ key = 1;
+ assert(bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ for (key = 1; key <= LOCAL_FREE_TARGET; key++) {
+ assert(!bpf_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ for (; key <= 2 * LOCAL_FREE_TARGET; key++) {
+ assert(!bpf_delete_elem(lru_map_fd, &key));
+ assert(bpf_delete_elem(lru_map_fd, &key));
+ }
+
+ end_key = key + 2 * LOCAL_FREE_TARGET;
+ for (; key < end_key; key++) {
+ assert(!bpf_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
+static void do_test_lru_small0(int cpu, int map_fd)
+{
+ unsigned long long key, value[nr_cpus];
+
+ /* Ensure the last key inserted by previous CPU can be found */
+ key = cpu;
+ assert(!bpf_lookup_elem(map_fd, &key, value));
+
+ value[0] = 1234;
+
+ key = cpu + 1;
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_lookup_elem(map_fd, &key, value));
+
+ /* Cannot find the last key because it was removed by LRU */
+ key = cpu;
+ assert(bpf_lookup_elem(map_fd, &key, value));
+}
+
+static void test_lru_small0(int map_type)
+{
+ unsigned long long key, value[nr_cpus];
+ int map_fd;
+ int i;
+
+ printf("%s (map_type%d): ", __func__, map_type);
+
+ map_fd = create_map(map_type, 1);
+ assert(map_fd != -1);
+
+ value[0] = 1234;
+ key = 0;
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+
+ for (i = 0; i < nr_cpus; i++) {
+ cpu_set_t cpuset;
+ pid_t pid;
+
+ pid = fork();
+ if (pid == 0) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(i, &cpuset);
+ assert(!sched_setaffinity(0, sizeof(cpuset), &cpuset));
+ do_test_lru_small0(i, map_fd);
+ exit(0);
+ } else if (pid == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ } else {
+ int status;
+
+ assert(waitpid(pid, &status, 0) == pid);
+ assert(status == 0);
+ }
+ }
+
+ close(map_fd);
+
+ printf("Pass\n");
+}
+
+static void test_lru_loss0(int map_type)
+{
+ unsigned long long key, value[nr_cpus];
+ unsigned int old_unused_losses = 0;
+ unsigned int new_unused_losses = 0;
+ unsigned int used_losses = 0;
+ int map_fd;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ map_fd = create_map(map_type, 900);
+ assert(map_fd != -1);
+
+ value[0] = 1234;
+
+ for (key = 1; key <= 1000; key++) {
+ int start_key, end_key;
+
+ assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
+
+ start_key = 101;
+ end_key = min(key, 900);
+
+ while (start_key <= end_key) {
+ bpf_lookup_elem(map_fd, &start_key, value);
+ start_key++;
+ }
+ }
+
+ for (key = 1; key <= 1000; key++) {
+ if (bpf_lookup_elem(map_fd, &key, value)) {
+ if (key <= 100)
+ old_unused_losses++;
+ else if (key <= 900)
+ used_losses++;
+ else
+ new_unused_losses++;
+ }
+ }
+
+ close(map_fd);
+
+ printf("older-elem-losses:%d(/100) active-elem-losses:%d(/800) "
+ "newer-elem-losses:%d(/100)\n",
+ old_unused_losses, used_losses, new_unused_losses);
+}
+
+static void test_lru_loss1(int map_type)
+{
+ unsigned long long key, value[nr_cpus];
+ int map_fd;
+ unsigned int nr_losses = 0;
+
+ printf("%s (map_type:%d): ", __func__, map_type);
+
+ map_fd = create_map(map_type, 1000);
+ assert(map_fd != -1);
+
+ value[0] = 1234;
+
+ for (key = 1; key <= 1000; key++)
+ assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+
+ for (key = 1; key <= 1000; key++) {
+ if (bpf_lookup_elem(map_fd, &key, value))
+ nr_losses++;
+ }
+
+ close(map_fd);
+
+ printf("nr_losses:%d(/1000)\n", nr_losses);
+}
+
+static void do_test_lru_parallel_loss(int task, void *data)
+{
+ const unsigned int nr_stable_elems = 1000;
+ const unsigned int nr_repeats = 100000;
+
+ int map_fd = *(int *)data;
+ unsigned long long stable_base;
+ unsigned long long key, value[nr_cpus];
+ unsigned long long next_ins_key;
+ unsigned int nr_losses = 0;
+ unsigned int i;
+
+ stable_base = task * nr_repeats * 2 + 1;
+ next_ins_key = stable_base;
+ value[0] = 1234;
+ for (i = 0; i < nr_stable_elems; i++) {
+ assert(bpf_update_elem(map_fd, &next_ins_key, value,
+ BPF_NOEXIST) == 0);
+ next_ins_key++;
+ }
+
+ for (i = 0; i < nr_repeats; i++) {
+ int rn;
+
+ rn = rand();
+
+ if (rn % 10) {
+ key = rn % nr_stable_elems + stable_base;
+ bpf_lookup_elem(map_fd, &key, value);
+ } else {
+ bpf_update_elem(map_fd, &next_ins_key, value,
+ BPF_NOEXIST);
+ next_ins_key++;
+ }
+ }
+
+ key = stable_base;
+ for (i = 0; i < nr_stable_elems; i++) {
+ if (bpf_lookup_elem(map_fd, &key, value))
+ nr_losses++;
+ key++;
+ }
+
+ printf(" task:%d nr_losses:%u\n", task, nr_losses);
+}
+
+static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data)
+{
+ cpu_set_t cpuset;
+ pid_t pid[tasks];
+ int i;
+
+ for (i = 0; i < tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(i, &cpuset);
+ assert(!sched_setaffinity(0, sizeof(cpuset), &cpuset));
+ fn(i, data);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+}
+
+static void test_lru_parallel_loss(int map_type, int nr_tasks)
+{
+ int map_fd;
+
+ printf("%s (map_type:%d):\n", __func__, map_type);
+
+ /* Give 20% more than the active working set */
+ map_fd = create_map(map_type, nr_tasks * (1000 + 200));
+
+ assert(map_fd != -1);
+
+ run_parallel(nr_tasks, do_test_lru_parallel_loss, &map_fd);
+
+ close(map_fd);
+}
+
+int main(int argc, char **argv)
+{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH};
+ int i;
+
+ setbuf(stdout, NULL);
+
+ assert(!setrlimit(RLIMIT_MEMLOCK, &r));
+
+ srand(time(NULL));
+
+ nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ assert(nr_cpus != -1);
+ printf("nr_cpus:%ld\n\n", nr_cpus);
+
+ for (i = 0; i < sizeof(map_types) / sizeof(*map_types); i++) {
+ test_lru_sanity0(map_types[i]);
+ test_lru_sanity1(map_types[i]);
+ test_lru_sanity2(map_types[i]);
+ test_lru_sanity3(map_types[i]);
+ test_lru_sanity4(map_types[i]);
+
+ test_lru_small0(map_types[i]);
+
+ test_lru_loss0(map_types[i]);
+ test_lru_loss1(map_types[i]);
+ test_lru_parallel_loss(map_types[i], nr_cpus);
+
+ printf("\n");
+ }
+
+
+ return 0;
+}
--
2.5.1
^ permalink raw reply related [flat|nested] 5+ messages in thread