* Re: [PATCH bpf-next 05/13] bpf: implement bpf iterator for hash maps
@ 2020-07-14 7:08 kernel test robot
0 siblings, 0 replies; 2+ messages in thread
From: kernel test robot @ 2020-07-14 7:08 UTC (permalink / raw)
To: kbuild
[-- Attachment #1: Type: text/plain, Size: 13569 bytes --]
CC: kbuild-all(a)lists.01.org
In-Reply-To: <20200713161744.3076960-1-yhs@fb.com>
References: <20200713161744.3076960-1-yhs@fb.com>
TO: Yonghong Song <yhs@fb.com>
TO: bpf(a)vger.kernel.org
TO: netdev(a)vger.kernel.org
CC: Alexei Starovoitov <ast@kernel.org>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: kernel-team(a)fb.com
CC: Martin KaFai Lau <kafai@fb.com>
Hi Yonghong,
I love your patch! Perhaps something to improve:
[auto build test WARNING on bpf-next/master]
url: https://github.com/0day-ci/linux/commits/Yonghong-Song/bpf-implement-bpf-iterator-for-map-elements/20200714-002048
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
:::::: branch date: 15 hours ago
:::::: commit date: 15 hours ago
config: microblaze-randconfig-s031-20200714 (attached as .config)
compiler: microblaze-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.2-41-g14e84ffc-dirty
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=microblaze
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
kernel/bpf/hashtab.c:622:19: sparse: sparse: subtraction of functions? Share your drugs
kernel/bpf/hashtab.c:663:19: sparse: sparse: subtraction of functions? Share your drugs
kernel/bpf/hashtab.c:1345:24: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void *ubatch @@ got void [noderef] __user * @@
kernel/bpf/hashtab.c:1345:24: sparse: expected void *ubatch
kernel/bpf/hashtab.c:1345:24: sparse: got void [noderef] __user *
kernel/bpf/hashtab.c:1374:46: sparse: sparse: incorrect type in argument 2 (different address spaces) @@ expected void const [noderef] __user *from @@ got void *ubatch @@
kernel/bpf/hashtab.c:1374:46: sparse: expected void const [noderef] __user *from
kernel/bpf/hashtab.c:1374:46: sparse: got void *ubatch
kernel/bpf/hashtab.c:1535:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected void *ubatch @@ got void [noderef] __user * @@
kernel/bpf/hashtab.c:1535:16: sparse: expected void *ubatch
kernel/bpf/hashtab.c:1535:16: sparse: got void [noderef] __user *
kernel/bpf/hashtab.c:1536:26: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void [noderef] __user *to @@ got void *ubatch @@
kernel/bpf/hashtab.c:1536:26: sparse: expected void [noderef] __user *to
kernel/bpf/hashtab.c:1536:26: sparse: got void *ubatch
kernel/bpf/hashtab.c:2075:19: sparse: sparse: subtraction of functions? Share your drugs
kernel/bpf/hashtab.c:1407:9: sparse: sparse: context imbalance in '__htab_map_lookup_and_delete_batch' - different lock contexts for basic block
>> kernel/bpf/hashtab.c:1649:17: sparse: sparse: context imbalance in 'bpf_hash_map_seq_find_next' - unexpected unlock
>> kernel/bpf/hashtab.c:1753:35: sparse: sparse: context imbalance in 'bpf_hash_map_seq_stop' - unexpected unlock
# https://github.com/0day-ci/linux/commit/6d3930f639dd699e23551c0fb98a74a0a413d2a9
git remote add linux-review https://github.com/0day-ci/linux
git remote update linux-review
git checkout 6d3930f639dd699e23551c0fb98a74a0a413d2a9
vim +/bpf_hash_map_seq_find_next +1649 kernel/bpf/hashtab.c
6d3930f639dd69 Yonghong Song 2020-07-13 1623
6d3930f639dd69 Yonghong Song 2020-07-13 1624 static struct htab_elem *
6d3930f639dd69 Yonghong Song 2020-07-13 1625 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
6d3930f639dd69 Yonghong Song 2020-07-13 1626 struct htab_elem *prev_elem)
6d3930f639dd69 Yonghong Song 2020-07-13 1627 {
6d3930f639dd69 Yonghong Song 2020-07-13 1628 const struct bpf_htab *htab = info->htab;
6d3930f639dd69 Yonghong Song 2020-07-13 1629 unsigned long flags = info->flags;
6d3930f639dd69 Yonghong Song 2020-07-13 1630 u32 skip_elems = info->skip_elems;
6d3930f639dd69 Yonghong Song 2020-07-13 1631 u32 bucket_id = info->bucket_id;
6d3930f639dd69 Yonghong Song 2020-07-13 1632 struct hlist_nulls_head *head;
6d3930f639dd69 Yonghong Song 2020-07-13 1633 struct hlist_nulls_node *n;
6d3930f639dd69 Yonghong Song 2020-07-13 1634 struct htab_elem *elem;
6d3930f639dd69 Yonghong Song 2020-07-13 1635 struct bucket *b;
6d3930f639dd69 Yonghong Song 2020-07-13 1636 u32 i, count;
6d3930f639dd69 Yonghong Song 2020-07-13 1637
6d3930f639dd69 Yonghong Song 2020-07-13 1638 if (bucket_id >= htab->n_buckets)
6d3930f639dd69 Yonghong Song 2020-07-13 1639 return NULL;
6d3930f639dd69 Yonghong Song 2020-07-13 1640
6d3930f639dd69 Yonghong Song 2020-07-13 1641 /* try to find next elem in the same bucket */
6d3930f639dd69 Yonghong Song 2020-07-13 1642 if (prev_elem) {
6d3930f639dd69 Yonghong Song 2020-07-13 1643 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
6d3930f639dd69 Yonghong Song 2020-07-13 1644 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
6d3930f639dd69 Yonghong Song 2020-07-13 1645 if (elem)
6d3930f639dd69 Yonghong Song 2020-07-13 1646 return elem;
6d3930f639dd69 Yonghong Song 2020-07-13 1647
6d3930f639dd69 Yonghong Song 2020-07-13 1648 /* not found, unlock and go to the next bucket */
6d3930f639dd69 Yonghong Song 2020-07-13 @1649 b = &htab->buckets[bucket_id++];
6d3930f639dd69 Yonghong Song 2020-07-13 1650 htab_unlock_bucket(htab, b, flags);
6d3930f639dd69 Yonghong Song 2020-07-13 1651 skip_elems = 0;
6d3930f639dd69 Yonghong Song 2020-07-13 1652 }
6d3930f639dd69 Yonghong Song 2020-07-13 1653
6d3930f639dd69 Yonghong Song 2020-07-13 1654 for (i = bucket_id; i < htab->n_buckets; i++) {
6d3930f639dd69 Yonghong Song 2020-07-13 1655 b = &htab->buckets[i];
6d3930f639dd69 Yonghong Song 2020-07-13 1656 flags = htab_lock_bucket(htab, b);
6d3930f639dd69 Yonghong Song 2020-07-13 1657
6d3930f639dd69 Yonghong Song 2020-07-13 1658 count = 0;
6d3930f639dd69 Yonghong Song 2020-07-13 1659 head = &b->head;
6d3930f639dd69 Yonghong Song 2020-07-13 1660 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
6d3930f639dd69 Yonghong Song 2020-07-13 1661 if (count >= skip_elems) {
6d3930f639dd69 Yonghong Song 2020-07-13 1662 info->flags = flags;
6d3930f639dd69 Yonghong Song 2020-07-13 1663 info->bucket_id = i;
6d3930f639dd69 Yonghong Song 2020-07-13 1664 info->skip_elems = count;
6d3930f639dd69 Yonghong Song 2020-07-13 1665 return elem;
6d3930f639dd69 Yonghong Song 2020-07-13 1666 }
6d3930f639dd69 Yonghong Song 2020-07-13 1667 count++;
6d3930f639dd69 Yonghong Song 2020-07-13 1668 }
6d3930f639dd69 Yonghong Song 2020-07-13 1669
6d3930f639dd69 Yonghong Song 2020-07-13 1670 htab_unlock_bucket(htab, b, flags);
6d3930f639dd69 Yonghong Song 2020-07-13 1671 skip_elems = 0;
6d3930f639dd69 Yonghong Song 2020-07-13 1672 }
6d3930f639dd69 Yonghong Song 2020-07-13 1673
6d3930f639dd69 Yonghong Song 2020-07-13 1674 info->bucket_id = i;
6d3930f639dd69 Yonghong Song 2020-07-13 1675 info->skip_elems = 0;
6d3930f639dd69 Yonghong Song 2020-07-13 1676 return NULL;
6d3930f639dd69 Yonghong Song 2020-07-13 1677 }
6d3930f639dd69 Yonghong Song 2020-07-13 1678
6d3930f639dd69 Yonghong Song 2020-07-13 1679 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
6d3930f639dd69 Yonghong Song 2020-07-13 1680 {
6d3930f639dd69 Yonghong Song 2020-07-13 1681 struct bpf_iter_seq_hash_map_info *info = seq->private;
6d3930f639dd69 Yonghong Song 2020-07-13 1682 struct htab_elem *elem;
6d3930f639dd69 Yonghong Song 2020-07-13 1683
6d3930f639dd69 Yonghong Song 2020-07-13 1684 elem = bpf_hash_map_seq_find_next(info, NULL);
6d3930f639dd69 Yonghong Song 2020-07-13 1685 if (!elem)
6d3930f639dd69 Yonghong Song 2020-07-13 1686 return NULL;
6d3930f639dd69 Yonghong Song 2020-07-13 1687
6d3930f639dd69 Yonghong Song 2020-07-13 1688 if (*pos == 0)
6d3930f639dd69 Yonghong Song 2020-07-13 1689 ++*pos;
6d3930f639dd69 Yonghong Song 2020-07-13 1690 return elem;
6d3930f639dd69 Yonghong Song 2020-07-13 1691 }
6d3930f639dd69 Yonghong Song 2020-07-13 1692
6d3930f639dd69 Yonghong Song 2020-07-13 1693 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6d3930f639dd69 Yonghong Song 2020-07-13 1694 {
6d3930f639dd69 Yonghong Song 2020-07-13 1695 struct bpf_iter_seq_hash_map_info *info = seq->private;
6d3930f639dd69 Yonghong Song 2020-07-13 1696
6d3930f639dd69 Yonghong Song 2020-07-13 1697 ++*pos;
6d3930f639dd69 Yonghong Song 2020-07-13 1698 ++info->skip_elems;
6d3930f639dd69 Yonghong Song 2020-07-13 1699 return bpf_hash_map_seq_find_next(info, v);
6d3930f639dd69 Yonghong Song 2020-07-13 1700 }
6d3930f639dd69 Yonghong Song 2020-07-13 1701
6d3930f639dd69 Yonghong Song 2020-07-13 1702 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
6d3930f639dd69 Yonghong Song 2020-07-13 1703 {
6d3930f639dd69 Yonghong Song 2020-07-13 1704 struct bpf_iter_seq_hash_map_info *info = seq->private;
6d3930f639dd69 Yonghong Song 2020-07-13 1705 u32 roundup_key_size, roundup_value_size;
6d3930f639dd69 Yonghong Song 2020-07-13 1706 struct bpf_iter__bpf_map_elem ctx = {};
6d3930f639dd69 Yonghong Song 2020-07-13 1707 struct bpf_map *map = info->map;
6d3930f639dd69 Yonghong Song 2020-07-13 1708 struct bpf_iter_meta meta;
6d3930f639dd69 Yonghong Song 2020-07-13 1709 int ret = 0, off = 0, cpu;
6d3930f639dd69 Yonghong Song 2020-07-13 1710 struct bpf_prog *prog;
6d3930f639dd69 Yonghong Song 2020-07-13 1711 void __percpu *pptr;
6d3930f639dd69 Yonghong Song 2020-07-13 1712
6d3930f639dd69 Yonghong Song 2020-07-13 1713 meta.seq = seq;
6d3930f639dd69 Yonghong Song 2020-07-13 1714 prog = bpf_iter_get_info(&meta, elem == NULL);
6d3930f639dd69 Yonghong Song 2020-07-13 1715 if (prog) {
6d3930f639dd69 Yonghong Song 2020-07-13 1716 ctx.meta = &meta;
6d3930f639dd69 Yonghong Song 2020-07-13 1717 ctx.map = info->map;
6d3930f639dd69 Yonghong Song 2020-07-13 1718 if (elem) {
6d3930f639dd69 Yonghong Song 2020-07-13 1719 roundup_key_size = round_up(map->key_size, 8);
6d3930f639dd69 Yonghong Song 2020-07-13 1720 ctx.key = elem->key;
6d3930f639dd69 Yonghong Song 2020-07-13 1721 if (!info->percpu_value_buf) {
6d3930f639dd69 Yonghong Song 2020-07-13 1722 ctx.value = elem->key + roundup_key_size;
6d3930f639dd69 Yonghong Song 2020-07-13 1723 } else {
6d3930f639dd69 Yonghong Song 2020-07-13 1724 roundup_value_size = round_up(map->value_size, 8);
6d3930f639dd69 Yonghong Song 2020-07-13 1725 pptr = htab_elem_get_ptr(elem, map->key_size);
6d3930f639dd69 Yonghong Song 2020-07-13 1726 for_each_possible_cpu(cpu) {
6d3930f639dd69 Yonghong Song 2020-07-13 1727 bpf_long_memcpy(info->percpu_value_buf + off,
6d3930f639dd69 Yonghong Song 2020-07-13 1728 per_cpu_ptr(pptr, cpu),
6d3930f639dd69 Yonghong Song 2020-07-13 1729 roundup_value_size);
6d3930f639dd69 Yonghong Song 2020-07-13 1730 off += roundup_value_size;
6d3930f639dd69 Yonghong Song 2020-07-13 1731 }
6d3930f639dd69 Yonghong Song 2020-07-13 1732 ctx.value = info->percpu_value_buf;
6d3930f639dd69 Yonghong Song 2020-07-13 1733 }
6d3930f639dd69 Yonghong Song 2020-07-13 1734 }
6d3930f639dd69 Yonghong Song 2020-07-13 1735 ret = bpf_iter_run_prog(prog, &ctx);
6d3930f639dd69 Yonghong Song 2020-07-13 1736 }
6d3930f639dd69 Yonghong Song 2020-07-13 1737
6d3930f639dd69 Yonghong Song 2020-07-13 1738 return ret;
6d3930f639dd69 Yonghong Song 2020-07-13 1739 }
6d3930f639dd69 Yonghong Song 2020-07-13 1740
6d3930f639dd69 Yonghong Song 2020-07-13 1741 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
6d3930f639dd69 Yonghong Song 2020-07-13 1742 {
6d3930f639dd69 Yonghong Song 2020-07-13 1743 return __bpf_hash_map_seq_show(seq, v);
6d3930f639dd69 Yonghong Song 2020-07-13 1744 }
6d3930f639dd69 Yonghong Song 2020-07-13 1745
6d3930f639dd69 Yonghong Song 2020-07-13 1746 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
6d3930f639dd69 Yonghong Song 2020-07-13 1747 {
6d3930f639dd69 Yonghong Song 2020-07-13 1748 struct bpf_iter_seq_hash_map_info *info = seq->private;
6d3930f639dd69 Yonghong Song 2020-07-13 1749
6d3930f639dd69 Yonghong Song 2020-07-13 1750 if (!v)
6d3930f639dd69 Yonghong Song 2020-07-13 1751 (void)__bpf_hash_map_seq_show(seq, NULL);
6d3930f639dd69 Yonghong Song 2020-07-13 1752 else
6d3930f639dd69 Yonghong Song 2020-07-13 @1753 htab_unlock_bucket(info->htab,
6d3930f639dd69 Yonghong Song 2020-07-13 1754 &info->htab->buckets[info->bucket_id],
6d3930f639dd69 Yonghong Song 2020-07-13 1755 info->flags);
6d3930f639dd69 Yonghong Song 2020-07-13 1756 }
6d3930f639dd69 Yonghong Song 2020-07-13 1757
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 24972 bytes --]
^ permalink raw reply [flat|nested] 2+ messages in thread
* [PATCH bpf-next 05/13] bpf: implement bpf iterator for hash maps
2020-07-13 16:17 [PATCH bpf-next 00/13] bpf: implement bpf iterator for map elements Yonghong Song
@ 2020-07-13 16:17 ` Yonghong Song
0 siblings, 0 replies; 2+ messages in thread
From: Yonghong Song @ 2020-07-13 16:17 UTC (permalink / raw)
To: bpf, netdev
Cc: Alexei Starovoitov, Daniel Borkmann, kernel-team, Martin KaFai Lau
The bpf iterators for hash, percpu hash, lru hash
and lru percpu hash are implemented. During link time,
bpf_iter_reg->check_target() will check map type
and ensure the program access key/value region is
within the map defined key/value size limit.
For percpu hash and lru hash maps, the bpf program
will receive values for all cpus. The map element
bpf iterator infrastructure will prepare value
properly before passing the value pointer to the
bpf program.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
kernel/bpf/hashtab.c | 191 ++++++++++++++++++++++++++++++++++++++++++
kernel/bpf/map_iter.c | 24 +++++-
2 files changed, 214 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index d4378d7d442b..56280b10cb99 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1612,6 +1612,193 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
true, false);
}
+struct bpf_iter_seq_hash_map_info {
+ struct bpf_map *map;
+ struct bpf_htab *htab;
+ void *percpu_value_buf; // non-zero means percpu hash
+ unsigned long flags;
+ u32 bucket_id;
+ u32 skip_elems;
+};
+
+static struct htab_elem *
+bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
+ struct htab_elem *prev_elem)
+{
+ const struct bpf_htab *htab = info->htab;
+ unsigned long flags = info->flags;
+ u32 skip_elems = info->skip_elems;
+ u32 bucket_id = info->bucket_id;
+ struct hlist_nulls_head *head;
+ struct hlist_nulls_node *n;
+ struct htab_elem *elem;
+ struct bucket *b;
+ u32 i, count;
+
+ if (bucket_id >= htab->n_buckets)
+ return NULL;
+
+ /* try to find next elem in the same bucket */
+ if (prev_elem) {
+ n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
+ elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
+ if (elem)
+ return elem;
+
+ /* not found, unlock and go to the next bucket */
+ b = &htab->buckets[bucket_id++];
+ htab_unlock_bucket(htab, b, flags);
+ skip_elems = 0;
+ }
+
+ for (i = bucket_id; i < htab->n_buckets; i++) {
+ b = &htab->buckets[i];
+ flags = htab_lock_bucket(htab, b);
+
+ count = 0;
+ head = &b->head;
+ hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+ if (count >= skip_elems) {
+ info->flags = flags;
+ info->bucket_id = i;
+ info->skip_elems = count;
+ return elem;
+ }
+ count++;
+ }
+
+ htab_unlock_bucket(htab, b, flags);
+ skip_elems = 0;
+ }
+
+ info->bucket_id = i;
+ info->skip_elems = 0;
+ return NULL;
+}
+
+static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct bpf_iter_seq_hash_map_info *info = seq->private;
+ struct htab_elem *elem;
+
+ elem = bpf_hash_map_seq_find_next(info, NULL);
+ if (!elem)
+ return NULL;
+
+ if (*pos == 0)
+ ++*pos;
+ return elem;
+}
+
+static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bpf_iter_seq_hash_map_info *info = seq->private;
+
+ ++*pos;
+ ++info->skip_elems;
+ return bpf_hash_map_seq_find_next(info, v);
+}
+
+static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
+{
+ struct bpf_iter_seq_hash_map_info *info = seq->private;
+ u32 roundup_key_size, roundup_value_size;
+ struct bpf_iter__bpf_map_elem ctx = {};
+ struct bpf_map *map = info->map;
+ struct bpf_iter_meta meta;
+ int ret = 0, off = 0, cpu;
+ struct bpf_prog *prog;
+ void __percpu *pptr;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, elem == NULL);
+ if (prog) {
+ ctx.meta = &meta;
+ ctx.map = info->map;
+ if (elem) {
+ roundup_key_size = round_up(map->key_size, 8);
+ ctx.key = elem->key;
+ if (!info->percpu_value_buf) {
+ ctx.value = elem->key + roundup_key_size;
+ } else {
+ roundup_value_size = round_up(map->value_size, 8);
+ pptr = htab_elem_get_ptr(elem, map->key_size);
+ for_each_possible_cpu(cpu) {
+ bpf_long_memcpy(info->percpu_value_buf + off,
+ per_cpu_ptr(pptr, cpu),
+ roundup_value_size);
+ off += roundup_value_size;
+ }
+ ctx.value = info->percpu_value_buf;
+ }
+ }
+ ret = bpf_iter_run_prog(prog, &ctx);
+ }
+
+ return ret;
+}
+
+static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
+{
+ return __bpf_hash_map_seq_show(seq, v);
+}
+
+static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_seq_hash_map_info *info = seq->private;
+
+ if (!v)
+ (void)__bpf_hash_map_seq_show(seq, NULL);
+ else
+ htab_unlock_bucket(info->htab,
+ &info->htab->buckets[info->bucket_id],
+ info->flags);
+}
+
+static int bpf_iter_init_hash_map(void *priv_data,
+ struct bpf_iter_aux_info *aux)
+{
+ struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
+ struct bpf_map *map = aux->map;
+ void *value_buf;
+ u32 buf_size;
+
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ buf_size = round_up(map->value_size, 8) * num_possible_cpus();
+ value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
+ if (!value_buf)
+ return -ENOMEM;
+
+ seq_info->percpu_value_buf = value_buf;
+ }
+
+ seq_info->map = map;
+ seq_info->htab = container_of(map, struct bpf_htab, map);
+ return 0;
+}
+
+static void bpf_iter_fini_hash_map(void *priv_data)
+{
+ struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
+
+ kfree(seq_info->percpu_value_buf);
+}
+
+static const struct seq_operations bpf_hash_map_seq_ops = {
+ .start = bpf_hash_map_seq_start,
+ .next = bpf_hash_map_seq_next,
+ .stop = bpf_hash_map_seq_stop,
+ .show = bpf_hash_map_seq_show,
+};
+
+static const struct bpf_iter_seq_info iter_seq_info = {
+ .seq_ops = &bpf_hash_map_seq_ops,
+ .init_seq_private = bpf_iter_init_hash_map,
+ .fini_seq_private = bpf_iter_fini_hash_map,
+ .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
+};
+
static int htab_map_btf_id;
const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
@@ -1626,6 +1813,7 @@ const struct bpf_map_ops htab_map_ops = {
BATCH_OPS(htab),
.map_btf_name = "bpf_htab",
.map_btf_id = &htab_map_btf_id,
+ .iter_seq_info = &iter_seq_info,
};
static int htab_lru_map_btf_id;
@@ -1643,6 +1831,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
BATCH_OPS(htab_lru),
.map_btf_name = "bpf_htab",
.map_btf_id = &htab_lru_map_btf_id,
+ .iter_seq_info = &iter_seq_info,
};
/* Called from eBPF program */
@@ -1760,6 +1949,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
BATCH_OPS(htab_percpu),
.map_btf_name = "bpf_htab",
.map_btf_id = &htab_percpu_map_btf_id,
+ .iter_seq_info = &iter_seq_info,
};
static int htab_lru_percpu_map_btf_id;
@@ -1775,6 +1965,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
BATCH_OPS(htab_lru_percpu),
.map_btf_name = "bpf_htab",
.map_btf_id = &htab_lru_percpu_map_btf_id,
+ .iter_seq_info = &iter_seq_info,
};
static int fd_htab_map_alloc_check(union bpf_attr *attr)
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index e740312a5456..2988244853d1 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -101,7 +101,29 @@ static const struct bpf_iter_reg bpf_map_reg_info = {
static int bpf_iter_check_map(struct bpf_prog *prog,
struct bpf_iter_aux_info *aux)
{
- return -EINVAL;
+ u32 key_acc_size, value_acc_size, key_size, value_size;
+ struct bpf_map *map = aux->map;
+ bool is_percpu = false;
+
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH)
+ is_percpu = true;
+ else if (map->map_type != BPF_MAP_TYPE_HASH &&
+ map->map_type != BPF_MAP_TYPE_LRU_HASH)
+ return -EINVAL;
+
+ key_acc_size = prog->aux->max_rdonly_access[0];
+ value_acc_size = prog->aux->max_rdonly_access[1];
+ key_size = map->key_size;
+ if (!is_percpu)
+ value_size = map->value_size;
+ else
+ value_size = round_up(map->value_size, 8) * num_possible_cpus();
+
+ if (key_acc_size > key_size || value_acc_size > value_size)
+ return -EACCES;
+
+ return 0;
}
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
--
2.24.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2020-07-14 7:08 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-14 7:08 [PATCH bpf-next 05/13] bpf: implement bpf iterator for hash maps kernel test robot
-- strict thread matches above, loose matches on Subject: below --
2020-07-13 16:17 [PATCH bpf-next 00/13] bpf: implement bpf iterator for map elements Yonghong Song
2020-07-13 16:17 ` [PATCH bpf-next 05/13] bpf: implement bpf iterator for hash maps Yonghong Song
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.