From: Mike Kravetz <mike.kravetz@oracle.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Roman Gushchin <guro@fb.com>, Michal Hocko <mhocko@suse.com>,
Shakeel Butt <shakeelb@google.com>,
Oscar Salvador <osalvador@suse.de>,
David Hildenbrand <david@redhat.com>,
Muchun Song <songmuchun@bytedance.com>,
David Rientjes <rientjes@google.com>,
Miaohe Lin <linmiaohe@huawei.com>,
Peter Zijlstra <peterz@infradead.org>,
Matthew Wilcox <willy@infradead.org>,
HORIGUCHI NAOYA <naoya.horiguchi@nec.com>,
"Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>,
Waiman Long <longman@redhat.com>, Peter Xu <peterx@redhat.com>,
Mina Almasry <almasrymina@google.com>,
Hillf Danton <hdanton@sina.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Barry Song <song.bao.hua@hisilicon.com>,
Will Deacon <will@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Mike Kravetz <mike.kravetz@oracle.com>
Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock
Date: Mon, 29 Mar 2021 16:23:55 -0700 [thread overview]
Message-ID: <20210329232402.575396-2-mike.kravetz@oracle.com> (raw)
In-Reply-To: <20210329232402.575396-1-mike.kravetz@oracle.com>
Ideally, cma_release could be called from any context. However, that is
not possible because a mutex is used to protect the per-area bitmap.
Change the bitmap to an irq safe spinlock.
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
mm/cma.c | 20 +++++++++++---------
mm/cma.h | 2 +-
mm/cma_debug.c | 10 ++++++----
3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/mm/cma.c b/mm/cma.c
index b2393b892d3b..80875fd4487b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -24,7 +24,6 @@
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
unsigned int count)
{
unsigned long bitmap_no, bitmap_count;
+ unsigned long flags;
bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
}
static void __init cma_activate_area(struct cma *cma)
@@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));
- mutex_init(&cma->lock);
+ spin_lock_init(&cma->lock);
#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
@@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
unsigned long start = 0;
unsigned long nr_part, nr_total = 0;
unsigned long nbits = cma_bitmap_maxno(cma);
+ unsigned long flags;
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
pr_info("number of available pages: ");
for (;;) {
next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
@@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
start = next_zero_bit + nr_zero;
}
pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
@@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+ unsigned long flags;
size_t i;
struct page *page = NULL;
int ret = -ENOMEM;
@@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
goto out;
for (;;) {
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
@@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
diff --git a/mm/cma.h b/mm/cma.h
index 68ffad4e430d..2c775877eae2 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -15,7 +15,7 @@ struct cma {
unsigned long count;
unsigned long *bitmap;
unsigned int order_per_bit; /* Order of pages represented by one bit */
- struct mutex lock;
+ spinlock_t lock;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index d5bf8aa34fdc..6379cfbfd568 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
{
struct cma *cma = data;
unsigned long used;
+ unsigned long flags;
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
/* pages counter is smaller than sizeof(int) */
used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
*val = (u64)used << cma->order_per_bit;
return 0;
@@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
unsigned long maxchunk = 0;
unsigned long start, end = 0;
unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
+ unsigned long flags;
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
for (;;) {
start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
if (start >= bitmap_maxno)
@@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
end = find_next_bit(cma->bitmap, bitmap_maxno, start);
maxchunk = max(end - start, maxchunk);
}
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
*val = (u64)maxchunk << cma->order_per_bit;
return 0;
--
2.30.2
next prev parent reply other threads:[~2021-03-29 23:24 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-29 23:23 [PATCH v2 0/8] make hugetlb put_page safe for all calling contexts Mike Kravetz
2021-03-29 23:23 ` Mike Kravetz [this message]
2021-03-30 1:13 ` [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock Roman Gushchin
2021-03-30 1:20 ` Song Bao Hua (Barry Song)
2021-03-30 2:18 ` Mike Kravetz
2021-03-30 8:01 ` Michal Hocko
2021-03-30 8:08 ` [External] " Muchun Song
2021-03-30 8:17 ` Song Bao Hua (Barry Song)
2021-03-30 8:18 ` Michal Hocko
2021-03-30 8:21 ` Muchun Song
2021-03-31 2:37 ` Mike Kravetz
2021-03-29 23:23 ` [PATCH v2 2/8] hugetlb: no need to drop hugetlb_lock to call cma_release Mike Kravetz
2021-03-30 1:13 ` Roman Gushchin
2021-03-30 8:01 ` Michal Hocko
2021-03-29 23:23 ` [PATCH v2 3/8] hugetlb: add per-hstate mutex to synchronize user adjustments Mike Kravetz
2021-03-30 2:23 ` [External] " Muchun Song
2021-03-29 23:23 ` [PATCH v2 4/8] hugetlb: create remove_hugetlb_page() to separate functionality Mike Kravetz
2021-03-29 23:23 ` [PATCH v2 5/8] hugetlb: call update_and_free_page without hugetlb_lock Mike Kravetz
2021-03-30 2:10 ` Miaohe Lin
2021-03-30 2:21 ` [External] " Muchun Song
2021-03-31 2:39 ` Mike Kravetz
2021-03-29 23:24 ` [PATCH v2 6/8] hugetlb: change free_pool_huge_page to remove_pool_huge_page Mike Kravetz
2021-03-30 2:30 ` [External] " Muchun Song
2021-03-30 8:06 ` Michal Hocko
2021-03-29 23:24 ` [PATCH v2 7/8] hugetlb: make free_huge_page irq safe Mike Kravetz
2021-03-29 23:24 ` [PATCH v2 8/8] hugetlb: add lockdep_assert_held() calls for hugetlb_lock Mike Kravetz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210329232402.575396-2-mike.kravetz@oracle.com \
--to=mike.kravetz@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=aneesh.kumar@linux.ibm.com \
--cc=david@redhat.com \
--cc=guro@fb.com \
--cc=hdanton@sina.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=longman@redhat.com \
--cc=mhocko@suse.com \
--cc=naoya.horiguchi@nec.com \
--cc=osalvador@suse.de \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=rientjes@google.com \
--cc=shakeelb@google.com \
--cc=song.bao.hua@hisilicon.com \
--cc=songmuchun@bytedance.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).