Hi all, Today's linux-next merge of the bpf-next tree got conflicts in: include/linux/bpf.h kernel/bpf/syscall.c between commit: ff1c08e1f74b ("bpf: Change size to u64 for bpf_map_{area_alloc, charge_init}()") from Linus' tree and commit: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY") from the bpf-next tree. I fixed it up (see below) and can carry the fix as necessary. This is now fixed as far as linux-next is concerned, but any non trivial conflicts should be mentioned to your upstream maintainer when your tree is submitted for merging. You may also want to consider cooperating with the maintainer of the conflicting tree to minimise any particularly complex conflicts. -- Cheers, Stephen Rothwell diff --cc include/linux/bpf.h index 464f3f7e0b7a,e913dd5946ae..000000000000 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -688,7 -798,8 +798,8 @@@ int bpf_map_charge_init(struct bpf_map_ void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_move(struct bpf_map_memory *dst, struct bpf_map_memory *src); -void *bpf_map_area_alloc(size_t size, int numa_node); -void *bpf_map_area_mmapable_alloc(size_t size, int numa_node); +void *bpf_map_area_alloc(u64 size, int numa_node); ++void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); diff --cc kernel/bpf/syscall.c index d447b5e343bf,bac3becf9f90..000000000000 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -127,7 -127,7 +127,7 @@@ static struct bpf_map *find_and_alloc_m return map; } - void *bpf_map_area_alloc(u64 size, int numa_node) -static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable) ++static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) { /* We really just want to fail instead of triggering OOM killer * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, @@@ -142,10 -142,8 +142,11 @@@ const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; void *area; + if (size >= SIZE_MAX) + return NULL; + - if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + /* kmalloc()'ed memory can't be mmap()'ed */ + if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, numa_node); if (area != NULL) @@@ -157,6 -159,16 +162,16 @@@ flags, __builtin_return_address(0)); } -void *bpf_map_area_alloc(size_t size, int numa_node) ++void *bpf_map_area_alloc(u64 size, int numa_node) + { + return __bpf_map_area_alloc(size, numa_node, false); + } + -void *bpf_map_area_mmapable_alloc(size_t size, int numa_node) ++void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) + { + return __bpf_map_area_alloc(size, numa_node, true); + } + void bpf_map_area_free(void *area) { kvfree(area);