linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mike Kravetz <mike.kravetz@oracle.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Roman Gushchin <guro@fb.com>, Michal Hocko <mhocko@suse.com>,
	Shakeel Butt <shakeelb@google.com>,
	Oscar Salvador <osalvador@suse.de>,
	David Hildenbrand <david@redhat.com>,
	Muchun Song <songmuchun@bytedance.com>,
	David Rientjes <rientjes@google.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Matthew Wilcox <willy@infradead.org>,
	HORIGUCHI NAOYA <naoya.horiguchi@nec.com>,
	"Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>,
	Waiman Long <longman@redhat.com>, Peter Xu <peterx@redhat.com>,
	Mina Almasry <almasrymina@google.com>,
	Hillf Danton <hdanton@sina.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Barry Song <song.bao.hua@hisilicon.com>,
	Will Deacon <will@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Mike Kravetz <mike.kravetz@oracle.com>
Subject: [PATCH v4 5/8] hugetlb: call update_and_free_page without hugetlb_lock
Date: Mon,  5 Apr 2021 16:00:40 -0700	[thread overview]
Message-ID: <20210405230043.182734-6-mike.kravetz@oracle.com> (raw)
In-Reply-To: <20210405230043.182734-1-mike.kravetz@oracle.com>

With the introduction of remove_hugetlb_page(), there is no need for
update_and_free_page to hold the hugetlb lock.  Change all callers to
drop the lock before calling.

With additional code modifications, this will allow loops which decrease
the huge page pool to drop the hugetlb_lock with each page to reduce
long hold times.

The ugly unlock/lock cycle in free_pool_huge_page will be removed in
a subsequent patch which restructures free_pool_huge_page.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb.c | 43 +++++++++++++++++++++++++++++++++----------
 1 file changed, 33 insertions(+), 10 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index df2a3d1f632b..be6031a8e2a9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1446,16 +1446,18 @@ static void __free_huge_page(struct page *page)
 
 	if (HPageTemporary(page)) {
 		remove_hugetlb_page(h, page, false);
+		spin_unlock(&hugetlb_lock);
 		update_and_free_page(h, page);
 	} else if (h->surplus_huge_pages_node[nid]) {
 		/* remove the page from active list */
 		remove_hugetlb_page(h, page, true);
+		spin_unlock(&hugetlb_lock);
 		update_and_free_page(h, page);
 	} else {
 		arch_clear_hugepage_flags(page);
 		enqueue_huge_page(h, page);
+		spin_unlock(&hugetlb_lock);
 	}
-	spin_unlock(&hugetlb_lock);
 }
 
 /*
@@ -1736,7 +1738,13 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 				list_entry(h->hugepage_freelists[node].next,
 					  struct page, lru);
 			remove_hugetlb_page(h, page, acct_surplus);
+			/*
+			 * unlock/lock around update_and_free_page is temporary
+			 * and will be removed with subsequent patch.
+			 */
+			spin_unlock(&hugetlb_lock);
 			update_and_free_page(h, page);
+			spin_lock(&hugetlb_lock);
 			ret = 1;
 			break;
 		}
@@ -1805,8 +1813,9 @@ int dissolve_free_huge_page(struct page *page)
 		}
 		remove_hugetlb_page(h, page, false);
 		h->max_huge_pages--;
+		spin_unlock(&hugetlb_lock);
 		update_and_free_page(h, head);
-		rc = 0;
+		return 0;
 	}
 out:
 	spin_unlock(&hugetlb_lock);
@@ -2291,6 +2300,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 	int nid = page_to_nid(old_page);
 	struct page *new_page;
+	struct page *page_to_free;
 	int ret = 0;
 
 	/*
@@ -2313,16 +2323,16 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 		 * Freed from under us. Drop new_page too.
 		 */
 		remove_hugetlb_page(h, new_page, false);
-		update_and_free_page(h, new_page);
-		goto unlock;
+		page_to_free = new_page;
+		goto unlock_free;
 	} else if (page_count(old_page)) {
 		/*
 		 * Someone has grabbed the page, try to isolate it here.
 		 * Fail with -EBUSY if not possible.
 		 */
 		remove_hugetlb_page(h, new_page, false);
-		update_and_free_page(h, new_page);
 		spin_unlock(&hugetlb_lock);
+		update_and_free_page(h, new_page);
 		if (!isolate_huge_page(old_page, list))
 			ret = -EBUSY;
 		return ret;
@@ -2344,11 +2354,12 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 		 * enqueue_huge_page for new page.  Net result is no change.
 		 */
 		remove_hugetlb_page(h, old_page, false);
-		update_and_free_page(h, old_page);
 		enqueue_huge_page(h, new_page);
+		page_to_free = old_page;
 	}
-unlock:
+unlock_free:
 	spin_unlock(&hugetlb_lock);
+	update_and_free_page(h, page_to_free);
 
 	return ret;
 }
@@ -2671,22 +2682,34 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
 						nodemask_t *nodes_allowed)
 {
 	int i;
+	struct page *page, *next;
+	LIST_HEAD(page_list);
 
 	if (hstate_is_gigantic(h))
 		return;
 
+	/*
+	 * Collect pages to be freed on a list, and free after dropping lock
+	 */
 	for_each_node_mask(i, *nodes_allowed) {
-		struct page *page, *next;
 		struct list_head *freel = &h->hugepage_freelists[i];
 		list_for_each_entry_safe(page, next, freel, lru) {
 			if (count >= h->nr_huge_pages)
-				return;
+				goto out;
 			if (PageHighMem(page))
 				continue;
 			remove_hugetlb_page(h, page, false);
-			update_and_free_page(h, page);
+			list_add(&page->lru, &page_list);
 		}
 	}
+
+out:
+	spin_unlock(&hugetlb_lock);
+	list_for_each_entry_safe(page, next, &page_list, lru) {
+		update_and_free_page(h, page);
+		cond_resched();
+	}
+	spin_lock(&hugetlb_lock);
 }
 #else
 static inline void try_to_free_low(struct hstate *h, unsigned long count,
-- 
2.30.2


  parent reply	other threads:[~2021-04-05 23:02 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-05 23:00 [PATCH v4 0/8] make hugetlb put_page safe for all calling contexts Mike Kravetz
2021-04-05 23:00 ` [PATCH v4 1/8] mm/cma: change cma mutex to irq safe spinlock Mike Kravetz
2021-04-06  9:17   ` Peter Zijlstra
2021-04-06  9:35   ` Michal Hocko
2021-04-07  9:23   ` David Hildenbrand
2021-04-07 18:24   ` Roman Gushchin
2021-04-05 23:00 ` [PATCH v4 2/8] hugetlb: no need to drop hugetlb_lock to call cma_release Mike Kravetz
2021-04-06  7:18   ` Oscar Salvador
2021-04-07  9:26   ` David Hildenbrand
2021-04-05 23:00 ` [PATCH v4 3/8] hugetlb: add per-hstate mutex to synchronize user adjustments Mike Kravetz
2021-04-07  9:28   ` David Hildenbrand
2021-04-05 23:00 ` [PATCH v4 4/8] hugetlb: create remove_hugetlb_page() to separate functionality Mike Kravetz
2021-04-06  9:56   ` Michal Hocko
2021-04-06 12:50     ` Oscar Salvador
2021-04-06 16:49     ` Mike Kravetz
2021-04-06 17:57       ` Oscar Salvador
2021-04-07  8:21       ` Michal Hocko
2021-04-06 13:41   ` Oscar Salvador
2021-04-06 20:52     ` Mike Kravetz
2021-04-06 13:44   ` Oscar Salvador
2021-04-05 23:00 ` Mike Kravetz [this message]
2021-04-06  9:57   ` [PATCH v4 5/8] hugetlb: call update_and_free_page without hugetlb_lock Michal Hocko
2021-04-07  8:27   ` Oscar Salvador
2021-04-07  9:28     ` Michal Hocko
2021-04-07  9:37       ` Oscar Salvador
2021-04-05 23:00 ` [PATCH v4 6/8] hugetlb: change free_pool_huge_page to remove_pool_huge_page Mike Kravetz
2021-04-07  8:44   ` Oscar Salvador
2021-04-05 23:00 ` [PATCH v4 7/8] hugetlb: make free_huge_page irq safe Mike Kravetz
2021-04-07  9:12   ` Oscar Salvador
2021-04-07  9:33     ` Michal Hocko
2021-04-07  9:38       ` Oscar Salvador
2021-04-05 23:00 ` [PATCH v4 8/8] hugetlb: add lockdep_assert_held() calls for hugetlb_lock Mike Kravetz
2021-04-07  9:13   ` Oscar Salvador
2021-04-08  0:56 ` [PATCH v4 0/8] make hugetlb put_page safe for all calling contexts Mike Kravetz
2021-04-08  7:11   ` Oscar Salvador
2021-04-09  5:05     ` Andrew Morton
2021-04-09 20:43       ` Mike Kravetz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210405230043.182734-6-mike.kravetz@oracle.com \
    --to=mike.kravetz@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=david@redhat.com \
    --cc=guro@fb.com \
    --cc=hdanton@sina.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=longman@redhat.com \
    --cc=mhocko@suse.com \
    --cc=naoya.horiguchi@nec.com \
    --cc=osalvador@suse.de \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=shakeelb@google.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=songmuchun@bytedance.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).