* [PATCH bpf-next 0/2] flexible size for bpf_prog_pack
@ 2022-02-10 6:41 Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 1/2] vmalloc: expose vmap_allow_huge via get_vmap_allow_huge() Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack Song Liu
0 siblings, 2 replies; 8+ messages in thread
From: Song Liu @ 2022-02-10 6:41 UTC (permalink / raw)
To: linux-mm, bpf, netdev
Cc: ast, daniel, andrii, kernel-team, akpm, eric.dumazet, Song Liu
There are two issues with bpf_prog_pack:
(1) On NUMA systems, bpf_prog_pack need to be bigger
(PMD_SIZE * num_online_nodes) to use huge pages.
(2) If the system doesn't support huge pages (nohugevmalloc in cmdline),
allocating PMD_SIZE for bpf_prog_pack is a waste.
Address these issues with flexible bpf_prog_pack_size().
Song Liu (2):
vmalloc: expose vmap_allow_huge via get_vmap_allow_huge()
bpf: flexible size for bpf_prog_pack
include/linux/vmalloc.h | 1 +
kernel/bpf/core.c | 47 +++++++++++++++++++++++------------------
mm/vmalloc.c | 5 +++++
3 files changed, 33 insertions(+), 20 deletions(-)
--
2.30.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH bpf-next 1/2] vmalloc: expose vmap_allow_huge via get_vmap_allow_huge()
2022-02-10 6:41 [PATCH bpf-next 0/2] flexible size for bpf_prog_pack Song Liu
@ 2022-02-10 6:41 ` Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack Song Liu
1 sibling, 0 replies; 8+ messages in thread
From: Song Liu @ 2022-02-10 6:41 UTC (permalink / raw)
To: linux-mm, bpf, netdev
Cc: ast, daniel, andrii, kernel-team, akpm, eric.dumazet, Song Liu
Users can use get_vmap_allow_huge() to predict the behavior of vmalloc (or
its variations). Specifically, if get_vmap_allow_huge() == false, vmalloc
will never return huge pages.
Signed-off-by: Song Liu <song@kernel.org>
---
include/linux/vmalloc.h | 1 +
mm/vmalloc.c | 5 +++++
2 files changed, 6 insertions(+)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 880227b9f044..22acfcd2d0d1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -256,6 +256,7 @@ extern long vread(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern bool get_vmap_allow_huge(void);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4165304d3547..895ac81b6bb4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -72,6 +72,11 @@ early_param("nohugevmalloc", set_nohugevmalloc);
static const bool vmap_allow_huge = false;
#endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
+bool get_vmap_allow_huge(void)
+{
+ return vmap_allow_huge;
+}
+
bool is_vmalloc_addr(const void *x)
{
unsigned long addr = (unsigned long)x;
--
2.30.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-10 6:41 [PATCH bpf-next 0/2] flexible size for bpf_prog_pack Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 1/2] vmalloc: expose vmap_allow_huge via get_vmap_allow_huge() Song Liu
@ 2022-02-10 6:41 ` Song Liu
2022-02-10 8:25 ` Daniel Borkmann
1 sibling, 1 reply; 8+ messages in thread
From: Song Liu @ 2022-02-10 6:41 UTC (permalink / raw)
To: linux-mm, bpf, netdev
Cc: ast, daniel, andrii, kernel-team, akpm, eric.dumazet, Song Liu
bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
allocate memory of size PMD_SIZE * num_online_nodes().
On the other hand, if the system doesn't support huge pages, it is more
efficient to allocate PAGE_SIZE bpf_prog_pack.
Address different scenarios with more flexible bpf_prog_pack_size().
Signed-off-by: Song Liu <song@kernel.org>
---
kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 42d96549a804..d961a1f07a13 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
* allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
* to host BPF programs.
*/
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
-#else
-#define BPF_PROG_PACK_SIZE PAGE_SIZE
-#endif
#define BPF_PROG_CHUNK_SHIFT 6
#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
-#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
struct bpf_prog_pack {
struct list_head list;
void *ptr;
- unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
+ unsigned long bitmap[];
};
-#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);
+static inline int bpf_prog_pack_size(void)
+{
+ /* If vmap_allow_huge == true, use pack size of the smallest
+ * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
+ * Otherwise, use pack size of PAGE_SIZE.
+ */
+ return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
+}
+
+static inline int bpf_prog_chunk_count(void)
+{
+ return bpf_prog_pack_size() / BPF_PROG_CHUNK_SIZE;
+}
+
static struct bpf_prog_pack *alloc_new_pack(void)
{
struct bpf_prog_pack *pack;
- pack = kzalloc(sizeof(*pack), GFP_KERNEL);
+ pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(bpf_prog_chunk_count()), GFP_KERNEL);
if (!pack)
return NULL;
- pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
+ pack->ptr = module_alloc(bpf_prog_pack_size());
if (!pack->ptr) {
kfree(pack);
return NULL;
}
- bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
+ bitmap_zero(pack->bitmap, bpf_prog_pack_size() / BPF_PROG_CHUNK_SIZE);
list_add_tail(&pack->list, &pack_list);
set_vm_flush_reset_perms(pack->ptr);
- set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
- set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+ set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size() / PAGE_SIZE);
+ set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size() / PAGE_SIZE);
return pack;
}
@@ -864,7 +871,7 @@ static void *bpf_prog_pack_alloc(u32 size)
unsigned long pos;
void *ptr = NULL;
- if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
+ if (size > bpf_prog_pack_size()) {
size = round_up(size, PAGE_SIZE);
ptr = module_alloc(size);
if (ptr) {
@@ -876,9 +883,9 @@ static void *bpf_prog_pack_alloc(u32 size)
}
mutex_lock(&pack_mutex);
list_for_each_entry(pack, &pack_list, list) {
- pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
+ pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
nbits, 0);
- if (pos < BPF_PROG_CHUNK_COUNT)
+ if (pos < bpf_prog_chunk_count())
goto found_free_area;
}
@@ -904,12 +911,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
unsigned long pos;
void *pack_ptr;
- if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
+ if (hdr->size > bpf_prog_pack_size()) {
module_memfree(hdr);
return;
}
- pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
+ pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size() - 1));
mutex_lock(&pack_mutex);
list_for_each_entry(tmp, &pack_list, list) {
@@ -926,8 +933,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
bitmap_clear(pack->bitmap, pos, nbits);
- if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
- BPF_PROG_CHUNK_COUNT, 0) == 0) {
+ if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
+ bpf_prog_chunk_count(), 0) == 0) {
list_del(&pack->list);
module_memfree(pack->ptr);
kfree(pack);
--
2.30.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-10 6:41 ` [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack Song Liu
@ 2022-02-10 8:25 ` Daniel Borkmann
2022-02-10 16:51 ` Song Liu
0 siblings, 1 reply; 8+ messages in thread
From: Daniel Borkmann @ 2022-02-10 8:25 UTC (permalink / raw)
To: Song Liu, linux-mm, bpf, netdev
Cc: ast, andrii, kernel-team, akpm, eric.dumazet, mhocko
On 2/10/22 7:41 AM, Song Liu wrote:
> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
> allocate memory of size PMD_SIZE * num_online_nodes().
>
> On the other hand, if the system doesn't support huge pages, it is more
> efficient to allocate PAGE_SIZE bpf_prog_pack.
>
> Address different scenarios with more flexible bpf_prog_pack_size().
>
> Signed-off-by: Song Liu <song@kernel.org>
> ---
> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
> 1 file changed, 27 insertions(+), 20 deletions(-)
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 42d96549a804..d961a1f07a13 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
> * to host BPF programs.
> */
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
> -#else
> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
> -#endif
> #define BPF_PROG_CHUNK_SHIFT 6
> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>
> struct bpf_prog_pack {
> struct list_head list;
> void *ptr;
> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
> + unsigned long bitmap[];
> };
>
> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>
> static DEFINE_MUTEX(pack_mutex);
> static LIST_HEAD(pack_list);
>
> +static inline int bpf_prog_pack_size(void)
> +{
> + /* If vmap_allow_huge == true, use pack size of the smallest
> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
> + * Otherwise, use pack size of PAGE_SIZE.
> + */
> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
> +}
Imho, this is making too many assumptions about implementation details. Can't we
just add a new module_alloc*() API instead which internally guarantees allocating
huge pages when enabled/supported (e.g. with a __weak function as fallback)?
> +static inline int bpf_prog_chunk_count(void)
> +{
> + return bpf_prog_pack_size() / BPF_PROG_CHUNK_SIZE;
> +}
> +
> static struct bpf_prog_pack *alloc_new_pack(void)
> {
> struct bpf_prog_pack *pack;
>
> - pack = kzalloc(sizeof(*pack), GFP_KERNEL);
> + pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(bpf_prog_chunk_count()), GFP_KERNEL);
> if (!pack)
> return NULL;
> - pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
> + pack->ptr = module_alloc(bpf_prog_pack_size());
> if (!pack->ptr) {
> kfree(pack);
> return NULL;
> }
> - bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
> + bitmap_zero(pack->bitmap, bpf_prog_pack_size() / BPF_PROG_CHUNK_SIZE);
> list_add_tail(&pack->list, &pack_list);
>
> set_vm_flush_reset_perms(pack->ptr);
> - set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> - set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> + set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size() / PAGE_SIZE);
> + set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size() / PAGE_SIZE);
> return pack;
> }
>
> @@ -864,7 +871,7 @@ static void *bpf_prog_pack_alloc(u32 size)
> unsigned long pos;
> void *ptr = NULL;
>
> - if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
> + if (size > bpf_prog_pack_size()) {
> size = round_up(size, PAGE_SIZE);
> ptr = module_alloc(size);
> if (ptr) {
> @@ -876,9 +883,9 @@ static void *bpf_prog_pack_alloc(u32 size)
> }
> mutex_lock(&pack_mutex);
> list_for_each_entry(pack, &pack_list, list) {
> - pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> + pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
> nbits, 0);
> - if (pos < BPF_PROG_CHUNK_COUNT)
> + if (pos < bpf_prog_chunk_count())
> goto found_free_area;
> }
>
> @@ -904,12 +911,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
> unsigned long pos;
> void *pack_ptr;
>
> - if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
> + if (hdr->size > bpf_prog_pack_size()) {
> module_memfree(hdr);
> return;
> }
>
> - pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
> + pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size() - 1));
> mutex_lock(&pack_mutex);
>
> list_for_each_entry(tmp, &pack_list, list) {
> @@ -926,8 +933,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
> pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
>
> bitmap_clear(pack->bitmap, pos, nbits);
> - if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> - BPF_PROG_CHUNK_COUNT, 0) == 0) {
> + if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
> + bpf_prog_chunk_count(), 0) == 0) {
> list_del(&pack->list);
> module_memfree(pack->ptr);
> kfree(pack);
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-10 8:25 ` Daniel Borkmann
@ 2022-02-10 16:51 ` Song Liu
2022-02-11 14:35 ` Daniel Borkmann
0 siblings, 1 reply; 8+ messages in thread
From: Song Liu @ 2022-02-10 16:51 UTC (permalink / raw)
To: Daniel Borkmann
Cc: Song Liu, Linux Memory Management List, bpf, Network Development,
Alexei Starovoitov, Andrii Nakryiko, Kernel Team, Andrew Morton,
Eric Dumazet, Michal Hocko
> On Feb 10, 2022, at 12:25 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On 2/10/22 7:41 AM, Song Liu wrote:
>> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
>> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
>> allocate memory of size PMD_SIZE * num_online_nodes().
>> On the other hand, if the system doesn't support huge pages, it is more
>> efficient to allocate PAGE_SIZE bpf_prog_pack.
>> Address different scenarios with more flexible bpf_prog_pack_size().
>> Signed-off-by: Song Liu <song@kernel.org>
>> ---
>> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
>> 1 file changed, 27 insertions(+), 20 deletions(-)
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index 42d96549a804..d961a1f07a13 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
>> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
>> * to host BPF programs.
>> */
>> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
>> -#else
>> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
>> -#endif
>> #define BPF_PROG_CHUNK_SHIFT 6
>> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
>> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
>> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>> struct bpf_prog_pack {
>> struct list_head list;
>> void *ptr;
>> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
>> + unsigned long bitmap[];
>> };
>> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
>> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>> static DEFINE_MUTEX(pack_mutex);
>> static LIST_HEAD(pack_list);
>> +static inline int bpf_prog_pack_size(void)
>> +{
>> + /* If vmap_allow_huge == true, use pack size of the smallest
>> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
>> + * Otherwise, use pack size of PAGE_SIZE.
>> + */
>> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
>> +}
>
> Imho, this is making too many assumptions about implementation details. Can't we
> just add a new module_alloc*() API instead which internally guarantees allocating
> huge pages when enabled/supported (e.g. with a __weak function as fallback)?
I agree that this is making too many assumptions. But a new module_alloc_huge()
may not work, because we need the caller to know the proper size to ask for.
(Or maybe I misunderstood your suggestion?)
How about we introduce something like
/* minimal size to get huge pages from vmalloc. If not possible,
* return 0 (or -1?)
*/
int vmalloc_hpage_min_size(void)
{
return vmap_allow_huge ? PMD_SIZE * num_online_nodes() : 0;
}
/* minimal size to get huge pages from module_alloc */
int module_alloc_hpage_min_size(void)
{
return vmalloc_hpage_min_size();
}
static inline int bpf_prog_pack_size(void)
{
return module_alloc_hpage_min_size() ? : PAGE_SIZE;
}
Thanks,
Song
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-10 16:51 ` Song Liu
@ 2022-02-11 14:35 ` Daniel Borkmann
2022-02-11 19:42 ` Song Liu
0 siblings, 1 reply; 8+ messages in thread
From: Daniel Borkmann @ 2022-02-11 14:35 UTC (permalink / raw)
To: Song Liu
Cc: Song Liu, Linux Memory Management List, bpf, Network Development,
Alexei Starovoitov, Andrii Nakryiko, Kernel Team, Andrew Morton,
Eric Dumazet, Michal Hocko
On 2/10/22 5:51 PM, Song Liu wrote:
>> On Feb 10, 2022, at 12:25 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>> On 2/10/22 7:41 AM, Song Liu wrote:
>>> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
>>> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
>>> allocate memory of size PMD_SIZE * num_online_nodes().
>>> On the other hand, if the system doesn't support huge pages, it is more
>>> efficient to allocate PAGE_SIZE bpf_prog_pack.
>>> Address different scenarios with more flexible bpf_prog_pack_size().
>>> Signed-off-by: Song Liu <song@kernel.org>
>>> ---
>>> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
>>> 1 file changed, 27 insertions(+), 20 deletions(-)
>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>> index 42d96549a804..d961a1f07a13 100644
>>> --- a/kernel/bpf/core.c
>>> +++ b/kernel/bpf/core.c
>>> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
>>> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
>>> * to host BPF programs.
>>> */
>>> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
>>> -#else
>>> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
>>> -#endif
>>> #define BPF_PROG_CHUNK_SHIFT 6
>>> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
>>> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
>>> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>>> struct bpf_prog_pack {
>>> struct list_head list;
>>> void *ptr;
>>> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
>>> + unsigned long bitmap[];
>>> };
>>> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
>>> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>>> static DEFINE_MUTEX(pack_mutex);
>>> static LIST_HEAD(pack_list);
>>> +static inline int bpf_prog_pack_size(void)
>>> +{
>>> + /* If vmap_allow_huge == true, use pack size of the smallest
>>> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
>>> + * Otherwise, use pack size of PAGE_SIZE.
>>> + */
>>> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
>>> +}
>>
>> Imho, this is making too many assumptions about implementation details. Can't we
>> just add a new module_alloc*() API instead which internally guarantees allocating
>> huge pages when enabled/supported (e.g. with a __weak function as fallback)?
>
> I agree that this is making too many assumptions. But a new module_alloc_huge()
> may not work, because we need the caller to know the proper size to ask for.
> (Or maybe I misunderstood your suggestion?)
>
> How about we introduce something like
>
> /* minimal size to get huge pages from vmalloc. If not possible,
> * return 0 (or -1?)
> */
> int vmalloc_hpage_min_size(void)
> {
> return vmap_allow_huge ? PMD_SIZE * num_online_nodes() : 0;
> }
And that would live inside mm/vmalloc.c and is exported to users ...
> /* minimal size to get huge pages from module_alloc */
> int module_alloc_hpage_min_size(void)
> {
> return vmalloc_hpage_min_size();
> }
... and this one as wrapper in module alloc infra with __weak attr?
> static inline int bpf_prog_pack_size(void)
> {
> return module_alloc_hpage_min_size() ? : PAGE_SIZE;
> }
Could probably work. It's not nice, but at least in the corresponding places so it's
not exposed / hard coded inside bpf and assuming implementation details which could
potentially break later on.
Thanks,
Daniel
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-11 14:35 ` Daniel Borkmann
@ 2022-02-11 19:42 ` Song Liu
2022-03-01 23:01 ` Song Liu
0 siblings, 1 reply; 8+ messages in thread
From: Song Liu @ 2022-02-11 19:42 UTC (permalink / raw)
To: Daniel Borkmann
Cc: Song Liu, Linux Memory Management List, bpf, Network Development,
Alexei Starovoitov, Andrii Nakryiko, Kernel Team, Andrew Morton,
Eric Dumazet, Michal Hocko
> On Feb 11, 2022, at 6:35 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On 2/10/22 5:51 PM, Song Liu wrote:
>>> On Feb 10, 2022, at 12:25 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>>> On 2/10/22 7:41 AM, Song Liu wrote:
>>>> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
>>>> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
>>>> allocate memory of size PMD_SIZE * num_online_nodes().
>>>> On the other hand, if the system doesn't support huge pages, it is more
>>>> efficient to allocate PAGE_SIZE bpf_prog_pack.
>>>> Address different scenarios with more flexible bpf_prog_pack_size().
>>>> Signed-off-by: Song Liu <song@kernel.org>
>>>> ---
>>>> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
>>>> 1 file changed, 27 insertions(+), 20 deletions(-)
>>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>>> index 42d96549a804..d961a1f07a13 100644
>>>> --- a/kernel/bpf/core.c
>>>> +++ b/kernel/bpf/core.c
>>>> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
>>>> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
>>>> * to host BPF programs.
>>>> */
>>>> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>>> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
>>>> -#else
>>>> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
>>>> -#endif
>>>> #define BPF_PROG_CHUNK_SHIFT 6
>>>> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
>>>> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
>>>> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>>>> struct bpf_prog_pack {
>>>> struct list_head list;
>>>> void *ptr;
>>>> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
>>>> + unsigned long bitmap[];
>>>> };
>>>> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
>>>> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>>>> static DEFINE_MUTEX(pack_mutex);
>>>> static LIST_HEAD(pack_list);
>>>> +static inline int bpf_prog_pack_size(void)
>>>> +{
>>>> + /* If vmap_allow_huge == true, use pack size of the smallest
>>>> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
>>>> + * Otherwise, use pack size of PAGE_SIZE.
>>>> + */
>>>> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
>>>> +}
>>>
>>> Imho, this is making too many assumptions about implementation details. Can't we
>>> just add a new module_alloc*() API instead which internally guarantees allocating
>>> huge pages when enabled/supported (e.g. with a __weak function as fallback)?
>> I agree that this is making too many assumptions. But a new module_alloc_huge()
>> may not work, because we need the caller to know the proper size to ask for.
>> (Or maybe I misunderstood your suggestion?)
>> How about we introduce something like
>> /* minimal size to get huge pages from vmalloc. If not possible,
>> * return 0 (or -1?)
>> */
>> int vmalloc_hpage_min_size(void)
>> {
>> return vmap_allow_huge ? PMD_SIZE * num_online_nodes() : 0;
>> }
>
> And that would live inside mm/vmalloc.c and is exported to users ...
Yeah, this will go to vmalloc.c.
>
>> /* minimal size to get huge pages from module_alloc */
>> int module_alloc_hpage_min_size(void)
>> {
>> return vmalloc_hpage_min_size();
>> }
>
> ... and this one as wrapper in module alloc infra with __weak attr?
And this goes to some module.c file(s). I am not quite sure whether we
need __weak attr or not.
>
>> static inline int bpf_prog_pack_size(void)
>> {
>> return module_alloc_hpage_min_size() ? : PAGE_SIZE;
>> }
>
> Could probably work. It's not nice, but at least in the corresponding places so it's
> not exposed / hard coded inside bpf and assuming implementation details which could
> potentially break later on.
I don't really like it either.
Another way to do this is to test the required size for bpf_prog_pack
in BPF code, something like the following. The pro of this version is
that we don't need changes in vmalloc and module code.
Thanks,
Song
diff --git i/kernel/bpf/core.c w/kernel/bpf/core.c
index 44623c9b5bb1..3cfd0f0c93d2 100644
--- i/kernel/bpf/core.c
+++ w/kernel/bpf/core.c
@@ -814,15 +814,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
* allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
* to host BPF programs.
*/
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
-#else
-#define BPF_PROG_PACK_SIZE PAGE_SIZE
-#endif
#define BPF_PROG_CHUNK_SHIFT 6
#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
-#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
struct bpf_prog_pack {
struct list_head list;
@@ -830,30 +824,56 @@ struct bpf_prog_pack {
unsigned long bitmap[];
};
-#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
+static int bpf_prog_pack_size = -1;
+
+static inline int bpf_prog_chunk_count(void)
+{
+ WARN_ON_ONCE(bpf_prog_pack_size == -1);
+ return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
+}
+
static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);
static struct bpf_prog_pack *alloc_new_pack(void)
{
struct bpf_prog_pack *pack;
+ void *ptr;
+ int size;
- pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL);
- if (!pack)
+ /* Test whether we can get huge pages. If not just use PAGE_SIZE
+ * packs.
+ */
+ if (bpf_prog_pack_size == -1) {
+ size = PMD_SIZE * num_online_nodes();
+ ptr = module_alloc(size);
+ if (is_vm_area_hugepages(ptr)) {
+ bpf_prog_pack_size = size;
+ goto got_ptr;
+ } else {
+ bpf_prog_pack_size = PAGE_SIZE;
+ vfree(ptr);
+ }
+ }
+
+ ptr = module_alloc(bpf_prog_pack_size);
+ if (!ptr)
return NULL;
- pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
- if (!pack->ptr) {
- kfree(pack);
+got_ptr:
+ pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(bpf_prog_chunk_count()), GFP_KERNEL);
+ if (!pack) {
+ vfree(ptr);
return NULL;
}
- bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
+ pack->ptr = ptr;
+ bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
list_add_tail(&pack->list, &pack_list);
set_vm_flush_reset_perms(pack->ptr);
- set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
- set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+ set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
+ set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
return pack;
}
@@ -864,7 +884,7 @@ static void *bpf_prog_pack_alloc(u32 size)
unsigned long pos;
void *ptr = NULL;
- if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
+ if (size > bpf_prog_pack_size) {
size = round_up(size, PAGE_SIZE);
ptr = module_alloc(size);
if (ptr) {
@@ -876,9 +896,9 @@ static void *bpf_prog_pack_alloc(u32 size)
}
mutex_lock(&pack_mutex);
list_for_each_entry(pack, &pack_list, list) {
- pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
+ pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
nbits, 0);
- if (pos < BPF_PROG_CHUNK_COUNT)
+ if (pos < bpf_prog_chunk_count())
goto found_free_area;
}
@@ -904,12 +924,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
unsigned long pos;
void *pack_ptr;
- if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
+ if (hdr->size > bpf_prog_pack_size) {
module_memfree(hdr);
return;
}
- pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
+ pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1));
mutex_lock(&pack_mutex);
list_for_each_entry(tmp, &pack_list, list) {
@@ -926,8 +946,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
bitmap_clear(pack->bitmap, pos, nbits);
- if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
- BPF_PROG_CHUNK_COUNT, 0) == 0) {
+ if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
+ bpf_prog_chunk_count(), 0) == 0) {
list_del(&pack->list);
module_memfree(pack->ptr);
kfree(pack);
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack
2022-02-11 19:42 ` Song Liu
@ 2022-03-01 23:01 ` Song Liu
0 siblings, 0 replies; 8+ messages in thread
From: Song Liu @ 2022-03-01 23:01 UTC (permalink / raw)
To: Daniel Borkmann
Cc: Song Liu, Linux Memory Management List, bpf, Network Development,
Alexei Starovoitov, Andrii Nakryiko, Kernel Team, Andrew Morton,
Eric Dumazet, Michal Hocko
> On Feb 11, 2022, at 11:42 AM, Song Liu <songliubraving@fb.com> wrote:
>
>
>
>> On Feb 11, 2022, at 6:35 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>>
>> On 2/10/22 5:51 PM, Song Liu wrote:
>>>> On Feb 10, 2022, at 12:25 AM, Daniel Borkmann <daniel@iogearbox.net> wrote:
>>>> On 2/10/22 7:41 AM, Song Liu wrote:
>>>>> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB.
>>>>> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to
>>>>> allocate memory of size PMD_SIZE * num_online_nodes().
>>>>> On the other hand, if the system doesn't support huge pages, it is more
>>>>> efficient to allocate PAGE_SIZE bpf_prog_pack.
>>>>> Address different scenarios with more flexible bpf_prog_pack_size().
>>>>> Signed-off-by: Song Liu <song@kernel.org>
>>>>> ---
>>>>> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++--------------------
>>>>> 1 file changed, 27 insertions(+), 20 deletions(-)
>>>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>>>> index 42d96549a804..d961a1f07a13 100644
>>>>> --- a/kernel/bpf/core.c
>>>>> +++ b/kernel/bpf/core.c
>>>>> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
>>>>> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
>>>>> * to host BPF programs.
>>>>> */
>>>>> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>>>> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
>>>>> -#else
>>>>> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
>>>>> -#endif
>>>>> #define BPF_PROG_CHUNK_SHIFT 6
>>>>> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
>>>>> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
>>>>> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>>>>> struct bpf_prog_pack {
>>>>> struct list_head list;
>>>>> void *ptr;
>>>>> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
>>>>> + unsigned long bitmap[];
>>>>> };
>>>>> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
>>>>> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>>>>> static DEFINE_MUTEX(pack_mutex);
>>>>> static LIST_HEAD(pack_list);
>>>>> +static inline int bpf_prog_pack_size(void)
>>>>> +{
>>>>> + /* If vmap_allow_huge == true, use pack size of the smallest
>>>>> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes().
>>>>> + * Otherwise, use pack size of PAGE_SIZE.
>>>>> + */
>>>>> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE;
>>>>> +}
>>>>
>>>> Imho, this is making too many assumptions about implementation details. Can't we
>>>> just add a new module_alloc*() API instead which internally guarantees allocating
>>>> huge pages when enabled/supported (e.g. with a __weak function as fallback)?
>>> I agree that this is making too many assumptions. But a new module_alloc_huge()
>>> may not work, because we need the caller to know the proper size to ask for.
>>> (Or maybe I misunderstood your suggestion?)
>>> How about we introduce something like
>>> /* minimal size to get huge pages from vmalloc. If not possible,
>>> * return 0 (or -1?)
>>> */
>>> int vmalloc_hpage_min_size(void)
>>> {
>>> return vmap_allow_huge ? PMD_SIZE * num_online_nodes() : 0;
>>> }
>>
>> And that would live inside mm/vmalloc.c and is exported to users ...
>
> Yeah, this will go to vmalloc.c.
>
>>
>>> /* minimal size to get huge pages from module_alloc */
>>> int module_alloc_hpage_min_size(void)
>>> {
>>> return vmalloc_hpage_min_size();
>>> }
>>
>> ... and this one as wrapper in module alloc infra with __weak attr?
>
> And this goes to some module.c file(s). I am not quite sure whether we
> need __weak attr or not.
>
>>
>>> static inline int bpf_prog_pack_size(void)
>>> {
>>> return module_alloc_hpage_min_size() ? : PAGE_SIZE;
>>> }
>>
>> Could probably work. It's not nice, but at least in the corresponding places so it's
>> not exposed / hard coded inside bpf and assuming implementation details which could
>> potentially break later on.
>
> I don't really like it either.
>
> Another way to do this is to test the required size for bpf_prog_pack
> in BPF code, something like the following. The pro of this version is
> that we don't need changes in vmalloc and module code.
Hi Daniel,
Do you have further suggestions on this? I personally like the following
version best, as all the changes are limited to bpf/core.c.
Thanks,
Song
> diff --git i/kernel/bpf/core.c w/kernel/bpf/core.c
> index 44623c9b5bb1..3cfd0f0c93d2 100644
> --- i/kernel/bpf/core.c
> +++ w/kernel/bpf/core.c
> @@ -814,15 +814,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
> * to host BPF programs.
> */
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
> -#else
> -#define BPF_PROG_PACK_SIZE PAGE_SIZE
> -#endif
> #define BPF_PROG_CHUNK_SHIFT 6
> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
>
> struct bpf_prog_pack {
> struct list_head list;
> @@ -830,30 +824,56 @@ struct bpf_prog_pack {
> unsigned long bitmap[];
> };
>
> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
>
> +static int bpf_prog_pack_size = -1;
> +
> +static inline int bpf_prog_chunk_count(void)
> +{
> + WARN_ON_ONCE(bpf_prog_pack_size == -1);
> + return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
> +}
> +
> static DEFINE_MUTEX(pack_mutex);
> static LIST_HEAD(pack_list);
>
> static struct bpf_prog_pack *alloc_new_pack(void)
> {
> struct bpf_prog_pack *pack;
> + void *ptr;
> + int size;
>
> - pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL);
> - if (!pack)
> + /* Test whether we can get huge pages. If not just use PAGE_SIZE
> + * packs.
> + */
> + if (bpf_prog_pack_size == -1) {
> + size = PMD_SIZE * num_online_nodes();
> + ptr = module_alloc(size);
> + if (is_vm_area_hugepages(ptr)) {
> + bpf_prog_pack_size = size;
> + goto got_ptr;
> + } else {
> + bpf_prog_pack_size = PAGE_SIZE;
> + vfree(ptr);
> + }
> + }
> +
> + ptr = module_alloc(bpf_prog_pack_size);
> + if (!ptr)
> return NULL;
> - pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
> - if (!pack->ptr) {
> - kfree(pack);
> +got_ptr:
> + pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(bpf_prog_chunk_count()), GFP_KERNEL);
> + if (!pack) {
> + vfree(ptr);
> return NULL;
> }
> - bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
> + pack->ptr = ptr;
> + bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
> list_add_tail(&pack->list, &pack_list);
>
> set_vm_flush_reset_perms(pack->ptr);
> - set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> - set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> + set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
> + set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
> return pack;
> }
>
> @@ -864,7 +884,7 @@ static void *bpf_prog_pack_alloc(u32 size)
> unsigned long pos;
> void *ptr = NULL;
>
> - if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
> + if (size > bpf_prog_pack_size) {
> size = round_up(size, PAGE_SIZE);
> ptr = module_alloc(size);
> if (ptr) {
> @@ -876,9 +896,9 @@ static void *bpf_prog_pack_alloc(u32 size)
> }
> mutex_lock(&pack_mutex);
> list_for_each_entry(pack, &pack_list, list) {
> - pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> + pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
> nbits, 0);
> - if (pos < BPF_PROG_CHUNK_COUNT)
> + if (pos < bpf_prog_chunk_count())
> goto found_free_area;
> }
>
> @@ -904,12 +924,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
> unsigned long pos;
> void *pack_ptr;
>
> - if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
> + if (hdr->size > bpf_prog_pack_size) {
> module_memfree(hdr);
> return;
> }
>
> - pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
> + pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1));
> mutex_lock(&pack_mutex);
>
> list_for_each_entry(tmp, &pack_list, list) {
> @@ -926,8 +946,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
> pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
>
> bitmap_clear(pack->bitmap, pos, nbits);
> - if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> - BPF_PROG_CHUNK_COUNT, 0) == 0) {
> + if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
> + bpf_prog_chunk_count(), 0) == 0) {
> list_del(&pack->list);
> module_memfree(pack->ptr);
> kfree(pack);
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-03-01 23:01 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-10 6:41 [PATCH bpf-next 0/2] flexible size for bpf_prog_pack Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 1/2] vmalloc: expose vmap_allow_huge via get_vmap_allow_huge() Song Liu
2022-02-10 6:41 ` [PATCH bpf-next 2/2] bpf: flexible size for bpf_prog_pack Song Liu
2022-02-10 8:25 ` Daniel Borkmann
2022-02-10 16:51 ` Song Liu
2022-02-11 14:35 ` Daniel Borkmann
2022-02-11 19:42 ` Song Liu
2022-03-01 23:01 ` Song Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).