diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 967631055210..a621d17e872d 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -140,6 +140,7 @@ struct maple_subtree_state { struct maple_big_node *bn; }; +extern int mas_debug; /* Functions */ static inline struct maple_node *mt_alloc_one(gfp_t gfp) { @@ -4550,6 +4551,9 @@ static inline void *mas_next_nentry(struct ma_state *mas, while (mas->offset < count) { pivot = pivots[mas->offset]; entry = mas_slot(mas, slots, mas->offset); + if (mas_debug) + pr_err("%s: entry = %px, slots=%px, mas->offset=%d\n", + __func__, entry, slots, mas->offset); if (ma_dead_node(node)) return NULL; @@ -4570,6 +4574,10 @@ static inline void *mas_next_nentry(struct ma_state *mas, pivot = mas_safe_pivot(mas, pivots, mas->offset, type); entry = mas_slot(mas, slots, mas->offset); + if (mas_debug) + pr_err("%s: entry = %px, slots=%px, mas->offset=%d count=%d\n", + __func__, entry, slots, mas->offset, count); + if (ma_dead_node(node)) return NULL; @@ -4580,6 +4588,8 @@ static inline void *mas_next_nentry(struct ma_state *mas, return NULL; found: + if (mas_debug) + pr_err("found pivot = %lx, entry = %px\n", pivot, entry); mas->last = pivot; return entry; } @@ -4618,6 +4628,9 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) unsigned long last; enum maple_type mt; + if (mas_debug) + pr_err("%s: entry\n", __func__); + last = mas->last; retry: offset = mas->offset; @@ -4625,10 +4638,17 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) node = mas_mn(mas); mt = mte_node_type(mas->node); mas->offset++; - if (unlikely(mas->offset >= mt_slots[mt])) + if (mas_debug) + pr_err("%s: offset=%d\n", __func__, offset); + if (unlikely(mas->offset >= mt_slots[mt])) { + if (mas_debug) + pr_err("%s: next node\n", __func__); goto next_node; + } while (!mas_is_none(mas)) { + if (mas_debug) + pr_err("%s: !none\n", __func__); entry = mas_next_nentry(mas, node, limit, mt); if (unlikely(ma_dead_node(node))) { mas_rewalk(mas, last); @@ -4656,6 +4676,8 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) mas->index = mas->last = limit; mas->offset = offset; mas->node = prev_node; + if (mas_debug) + pr_err("%s: return NULL, mas->node = %px\n", __func__, prev_node); return NULL; } @@ -4914,6 +4936,8 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) void *mas_walk(struct ma_state *mas) { void *entry; + if (mas_debug) + pr_err("%s\n", __func__); retry: entry = mas_state_walk(mas); @@ -5838,7 +5862,12 @@ EXPORT_SYMBOL_GPL(mas_pause); */ void *mas_find(struct ma_state *mas, unsigned long max) { + if (mas_debug) + pr_err("%s: max=%lx\n", __func__, max); + if (unlikely(mas_is_paused(mas))) { + if (mas_debug) + pr_err("%s: paused\n", __func__); if (unlikely(mas->last == ULONG_MAX)) { mas->node = MAS_NONE; return NULL; @@ -5848,6 +5877,8 @@ void *mas_find(struct ma_state *mas, unsigned long max) } if (unlikely(mas_is_start(mas))) { + if (mas_debug) + pr_err("%s: start\n", __func__); /* First run or continue */ void *entry; @@ -5859,8 +5890,10 @@ void *mas_find(struct ma_state *mas, unsigned long max) return entry; } - if (unlikely(!mas_searchable(mas))) + if (unlikely(!mas_searchable(mas))) { + pr_err("%s: not searchable\n", __func__); return NULL; + } /* Retries on dead nodes handled by mas_next_entry */ return mas_next_entry(mas, max); diff --git a/mm/mmap.c b/mm/mmap.c index 2049500931ae..16ee834c0a4c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -57,6 +57,7 @@ #include "internal.h" +int mas_debug; #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif @@ -2375,6 +2376,10 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, int count = 0; int error = -ENOMEM; MA_STATE(mas_detach, &mt_detach, start, end - 1); + + if (mas_debug) + pr_err("%s:%d\n", __func__, __LINE__); + mt_init_flags(&mt_detach, MM_MT_FLAGS); mt_set_external_lock(&mt_detach, &mm->mmap_lock); @@ -2541,28 +2546,37 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, * * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise. */ + + + int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool downgrade) { unsigned long end; struct vm_area_struct *vma; + if (mas_debug) + pr_err("%s: %lx %lx\n", __func__, start, len); if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; - + if (mas_debug) + pr_err("%s:%d\n", __func__, __LINE__); end = start + PAGE_ALIGN(len); if (end == start) return -EINVAL; - + if (mas_debug) + pr_err("%s:%d\n", __func__, __LINE__); /* arch_unmap() might do unmaps itself. */ arch_unmap(mm, start, end); - + if (mas_debug) + pr_err("%s:%d\n", __func__, __LINE__); /* Find the first overlapping VMA */ vma = mas_find(mas, end - 1); if (!vma) return 0; - + if (mas_debug) + pr_err("vma=%px\n", vma); return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); } @@ -2594,6 +2608,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, pgoff_t vm_pgoff; int error; MA_STATE(mas, &mm->mm_mt, addr, end - 1); + mas_debug = (addr == (1UL << 54)); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { @@ -2609,23 +2624,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } - /* Unmap any existing mapping in the area */ - if (do_mas_munmap(&mas, mm, addr, len, uf, false)) + + if (do_mas_munmap(&mas, mm, addr, len, uf, false)) { + mas_debug = 0; return -ENOMEM; + } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory_mm(mm, charged)) + if (security_vm_enough_memory_mm(mm, charged)) { + mas_debug = 0; return -ENOMEM; + } vm_flags |= VM_ACCOUNT; } next = mas_next(&mas, ULONG_MAX); prev = mas_prev(&mas, 0); + if (mas_debug) + pr_err("%s: next %px\n", __func__, next); + mas_debug = 0; if (vm_flags & VM_SPECIAL) goto cannot_expand;