All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>,
	Uladzislau Rezki <urezki@gmail.com>
Cc: linux-mm@kvack.org
Subject: [PATCH 04/10] mm: move vmalloc_init and free_work down in vmalloc.c
Date: Thu, 19 Jan 2023 11:02:20 +0100	[thread overview]
Message-ID: <20230119100226.789506-5-hch@lst.de> (raw)
In-Reply-To: <20230119100226.789506-1-hch@lst.de>

Move these two functions around a bit to avoid forward declarations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/vmalloc.c | 105 +++++++++++++++++++++++++--------------------------
 1 file changed, 52 insertions(+), 53 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fafb6227f4428f..daeb28b54663d5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -89,17 +89,6 @@ struct vfree_deferred {
 };
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 
-static void __vunmap(const void *, int);
-
-static void free_work(struct work_struct *w)
-{
-	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
-	struct llist_node *t, *llnode;
-
-	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
-		__vunmap((void *)llnode, 1);
-}
-
 /*** Page table manipulation functions ***/
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			phys_addr_t phys_addr, pgprot_t prot,
@@ -2449,48 +2438,6 @@ static void vmap_init_free_space(void)
 	}
 }
 
-void __init vmalloc_init(void)
-{
-	struct vmap_area *va;
-	struct vm_struct *tmp;
-	int i;
-
-	/*
-	 * Create the cache for vmap_area objects.
-	 */
-	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
-
-	for_each_possible_cpu(i) {
-		struct vmap_block_queue *vbq;
-		struct vfree_deferred *p;
-
-		vbq = &per_cpu(vmap_block_queue, i);
-		spin_lock_init(&vbq->lock);
-		INIT_LIST_HEAD(&vbq->free);
-		p = &per_cpu(vfree_deferred, i);
-		init_llist_head(&p->list);
-		INIT_WORK(&p->wq, free_work);
-	}
-
-	/* Import existing vmlist entries. */
-	for (tmp = vmlist; tmp; tmp = tmp->next) {
-		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-		if (WARN_ON_ONCE(!va))
-			continue;
-
-		va->va_start = (unsigned long)tmp->addr;
-		va->va_end = va->va_start + tmp->size;
-		va->vm = tmp;
-		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
-	}
-
-	/*
-	 * Now we can initialize a free vmap space.
-	 */
-	vmap_init_free_space();
-	vmap_initialized = true;
-}
-
 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
 	struct vmap_area *va, unsigned long flags, const void *caller)
 {
@@ -2769,6 +2716,15 @@ static void __vunmap(const void *addr, int deallocate_pages)
 	kfree(area);
 }
 
+static void delayed_vfree_work(struct work_struct *w)
+{
+	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+	struct llist_node *t, *llnode;
+
+	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
+		__vunmap((void *)llnode, 1);
+}
+
 /**
  * vfree_atomic - release memory allocated by vmalloc()
  * @addr:	  memory base address
@@ -4315,3 +4271,46 @@ static int __init proc_vmalloc_init(void)
 module_init(proc_vmalloc_init);
 
 #endif
+
+void __init vmalloc_init(void)
+{
+	struct vmap_area *va;
+	struct vm_struct *tmp;
+	int i;
+
+	/*
+	 * Create the cache for vmap_area objects.
+	 */
+	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
+
+	for_each_possible_cpu(i) {
+		struct vmap_block_queue *vbq;
+		struct vfree_deferred *p;
+
+		vbq = &per_cpu(vmap_block_queue, i);
+		spin_lock_init(&vbq->lock);
+		INIT_LIST_HEAD(&vbq->free);
+		p = &per_cpu(vfree_deferred, i);
+		init_llist_head(&p->list);
+		INIT_WORK(&p->wq, delayed_vfree_work);
+	}
+
+	/* Import existing vmlist entries. */
+	for (tmp = vmlist; tmp; tmp = tmp->next) {
+		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+		if (WARN_ON_ONCE(!va))
+			continue;
+
+		va->va_start = (unsigned long)tmp->addr;
+		va->va_end = va->va_start + tmp->size;
+		va->vm = tmp;
+		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+	}
+
+	/*
+	 * Now we can initialize a free vmap space.
+	 */
+	vmap_init_free_space();
+	vmap_initialized = true;
+}
+
-- 
2.39.0



  parent reply	other threads:[~2023-01-19 10:02 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-19 10:02 cleanup vfree and vunmap Christoph Hellwig
2023-01-19 10:02 ` [PATCH 01/10] vmalloc: reject vmap with VM_FLUSH_RESET_PERMS Christoph Hellwig
2023-01-19 18:46   ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 02/10] mm: remove __vfree Christoph Hellwig
2023-01-19 18:47   ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 03/10] mm: remove __vfree_deferred Christoph Hellwig
2023-01-19 18:47   ` Uladzislau Rezki
2023-01-19 10:02 ` Christoph Hellwig [this message]
2023-01-19 18:48   ` [PATCH 04/10] mm: move vmalloc_init and free_work down in vmalloc.c Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 05/10] mm: call vfree instead of __vunmap from delayed_vfree_work Christoph Hellwig
2023-01-19 18:48   ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 06/10] mm: move __remove_vm_area out of va_remove_mappings Christoph Hellwig
2023-01-19 18:48   ` Uladzislau Rezki
2023-01-20  7:41     ` Christoph Hellwig
2023-01-20 11:32       ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 07/10] mm: use remove_vm_area in __vunmap Christoph Hellwig
2023-01-19 18:49   ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 08/10] mm: move debug checks from __vunmap to remove_vm_area Christoph Hellwig
2023-01-19 18:49   ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 09/10] mm: split __vunmap Christoph Hellwig
2023-01-19 18:50   ` Uladzislau Rezki
2023-01-20  7:42     ` Christoph Hellwig
2023-01-20 11:32       ` Uladzislau Rezki
2023-01-19 10:02 ` [PATCH 10/10] mm: refactor va_remove_mappings Christoph Hellwig
2023-01-19 18:50   ` Uladzislau Rezki
2023-01-19 16:45 ` cleanup vfree and vunmap Uladzislau Rezki
2023-01-21  7:10 Christoph Hellwig
2023-01-21  7:10 ` [PATCH 04/10] mm: move vmalloc_init and free_work down in vmalloc.c Christoph Hellwig
2023-01-23 10:37   ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230119100226.789506-5-hch@lst.de \
    --to=hch@lst.de \
    --cc=akpm@linux-foundation.org \
    --cc=linux-mm@kvack.org \
    --cc=urezki@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.