From: Hui Zhu <zhuhui@xiaomi.com>
To: rjw@rjwysocki.net, len.brown@intel.com, pavel@ucw.cz,
m.szyprowski@samsung.com, akpm@linux-foundation.org,
mina86@mina86.com, aneesh.kumar@linux.vnet.ibm.com,
iamjoonsoo.kim@lge.com, hannes@cmpxchg.org, riel@redhat.com,
mgorman@suse.de, minchan@kernel.org, nasa4836@gmail.com,
ddstreet@ieee.org, hughd@google.com, mingo@kernel.org,
rientjes@google.com, peterz@infradead.org, keescook@chromium.org,
atomlin@redhat.com, raistlin@linux.it, axboe@fb.com,
paulmck@linux.vnet.ibm.com, kirill.shutemov@linux.intel.com,
n-horiguchi@ah.jp.nec.com, k.khlebnikov@samsung.com,
msalter@redhat.com, deller@gmx.de, tangchen@cn.fujitsu.com,
ben@decadent.org.uk, akinobu.mita@gmail.com,
lauraa@codeaurora.org, vbabka@suse.cz, sasha.levin@oracle.com,
vdavydov@parallels.com, suleiman@google.com
Cc: linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org,
linux-mm@kvack.org, Hui Zhu <zhuhui@xiaomi.com>
Subject: [PATCH 3/4] (CMA_AGGRESSIVE) Update reserve custom contiguous area code
Date: Thu, 16 Oct 2014 11:35:50 +0800 [thread overview]
Message-ID: <1413430551-22392-4-git-send-email-zhuhui@xiaomi.com> (raw)
In-Reply-To: <1413430551-22392-1-git-send-email-zhuhui@xiaomi.com>
Add cma_alloc_counter, cma_aggressive_switch, cma_aggressive_free_min and
cma_aggressive_shrink_switch.
cma_aggressive_switch is the swith for all CMA_AGGRESSIVE function. It can be
controlled by sysctl vm.cma-aggressive-switch.
cma_aggressive_free_min can be controlled by sysctl
"vm.cma-aggressive-free-min". If the number of CMA free pages is small than
this sysctl value, CMA_AGGRESSIVE will not work in page alloc code.
cma_aggressive_shrink_switch can be controlled by sysctl
"vm.cma-aggressive-shrink-switch". If sysctl "vm.cma-aggressive-shrink-switch"
is true and free normal memory's size is smaller than the size that it want to
allocate, do memory shrink with function shrink_all_memory before driver
allocate pages from CMA.
When Linux kernel try to reserve custom contiguous area, increase the value of
cma_alloc_counter. CMA_AGGRESSIVE will not work in page alloc code.
After reserve custom contiguous area function return, decreases the value of
cma_alloc_counter.
Signed-off-by: Hui Zhu <zhuhui@xiaomi.com>
---
include/linux/cma.h | 7 +++++++
kernel/sysctl.c | 27 +++++++++++++++++++++++++++
mm/cma.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 88 insertions(+)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0430ed0..df96abf 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,6 +15,13 @@
struct cma;
+#ifdef CONFIG_CMA_AGGRESSIVE
+extern atomic_t cma_alloc_counter;
+extern int cma_aggressive_switch;
+extern unsigned long cma_aggressive_free_min;
+extern int cma_aggressive_shrink_switch;
+#endif
+
extern phys_addr_t cma_get_base(struct cma *cma);
extern unsigned long cma_get_size(struct cma *cma);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4aada6d..646929e2 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -92,6 +92,10 @@
#include <linux/nmi.h>
#endif
+#ifdef CONFIG_CMA_AGGRESSIVE
+#include <linux/cma.h>
+#endif
+
#if defined(CONFIG_SYSCTL)
@@ -1485,6 +1489,29 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+#ifdef CONFIG_CMA_AGGRESSIVE
+ {
+ .procname = "cma-aggressive-switch",
+ .data = &cma_aggressive_switch,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "cma-aggressive-free-min",
+ .data = &cma_aggressive_free_min,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0600,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "cma-aggressive-shrink-switch",
+ .data = &cma_aggressive_shrink_switch,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec,
+ },
+#endif
{ }
};
diff --git a/mm/cma.c b/mm/cma.c
index 963bc4a..566ed5f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@
#include <linux/log2.h>
#include <linux/cma.h>
#include <linux/highmem.h>
+#include <linux/swap.h>
struct cma {
unsigned long base_pfn;
@@ -127,6 +128,27 @@ err:
return -EINVAL;
}
+#ifdef CONFIG_CMA_AGGRESSIVE
+/* The counter for the dma_alloc_from_contiguous and
+ dma_release_from_contiguous. */
+atomic_t cma_alloc_counter = ATOMIC_INIT(0);
+
+/* Swich of CMA_AGGRESSIVE. */
+int cma_aggressive_switch __read_mostly;
+
+/* If the number of CMA free pages is small than this value, CMA_AGGRESSIVE will
+ not work. */
+#ifdef CONFIG_CMA_AGGRESSIVE_FREE_MIN
+unsigned long cma_aggressive_free_min __read_mostly =
+ CONFIG_CMA_AGGRESSIVE_FREE_MIN;
+#else
+unsigned long cma_aggressive_free_min __read_mostly = 500;
+#endif
+
+/* Swich of CMA_AGGRESSIVE shink. */
+int cma_aggressive_shrink_switch __read_mostly;
+#endif
+
static int __init cma_init_reserved_areas(void)
{
int i;
@@ -138,6 +160,22 @@ static int __init cma_init_reserved_areas(void)
return ret;
}
+#ifdef CONFIG_CMA_AGGRESSIVE
+ cma_aggressive_switch = 0;
+#ifdef CONFIG_CMA_AGGRESSIVE_PHY_MAX
+ if (memblock_phys_mem_size() <= CONFIG_CMA_AGGRESSIVE_PHY_MAX)
+#else
+ if (memblock_phys_mem_size() <= 0x40000000)
+#endif
+ cma_aggressive_switch = 1;
+
+ cma_aggressive_shrink_switch = 0;
+#ifdef CONFIG_CMA_AGGRESSIVE_SHRINK
+ if (cma_aggressive_switch)
+ cma_aggressive_shrink_switch = 1;
+#endif
+#endif
+
return 0;
}
core_initcall(cma_init_reserved_areas);
@@ -312,6 +350,11 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
+#ifdef CONFIG_CMA_AGGRESSIVE
+ int free = global_page_state(NR_FREE_PAGES)
+ - global_page_state(NR_FREE_CMA_PAGES)
+ - totalreserve_pages;
+#endif
if (!cma || !cma->count)
return NULL;
@@ -326,6 +369,13 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+#ifdef CONFIG_CMA_AGGRESSIVE
+ atomic_inc(&cma_alloc_counter);
+ if (cma_aggressive_switch && cma_aggressive_shrink_switch
+ && free < count)
+ shrink_all_memory(count - free, false);
+#endif
+
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
@@ -361,6 +411,10 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
start = bitmap_no + mask + 1;
}
+#ifdef CONFIG_CMA_AGGRESSIVE
+ atomic_dec(&cma_alloc_counter);
+#endif
+
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
--
1.9.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-10-16 3:35 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-10-16 3:35 [PATCH 0/4] (CMA_AGGRESSIVE) Make CMA memory be more aggressive about allocation Hui Zhu
2014-10-16 3:35 ` [PATCH 1/4] (CMA_AGGRESSIVE) Add CMA_AGGRESSIVE to Kconfig Hui Zhu
2014-10-18 22:15 ` Pavel Machek
[not found] ` <201410220126.s9M1Qita026502@spam.xiaomi.com>
2014-10-22 5:44 ` 朱辉
2014-10-16 3:35 ` [PATCH 2/4] (CMA_AGGRESSIVE) Add argument hibernation to function shrink_all_memory Hui Zhu
2014-10-16 8:45 ` Rafael J. Wysocki
2014-10-17 6:18 ` 朱辉
2014-10-17 9:28 ` [PATCH v2 2/4] (CMA_AGGRESSIVE) Add new function shrink_all_memory_for_cma Hui Zhu
2014-10-18 4:50 ` PINTU KUMAR
2014-10-16 3:35 ` Hui Zhu [this message]
2014-10-17 9:30 ` [PATCH v2 3/4] (CMA_AGGRESSIVE) Update reserve custom contiguous area code Hui Zhu
2014-10-16 3:35 ` [PATCH 4/4] (CMA_AGGRESSIVE) Update page alloc function Hui Zhu
2014-10-24 5:28 ` Joonsoo Kim
2014-11-28 3:45 ` Hui Zhu
2014-10-16 5:13 ` [PATCH 0/4] (CMA_AGGRESSIVE) Make CMA memory be more aggressive about allocation Weijie Yang
2014-10-16 8:55 ` Laura Abbott
2014-10-17 7:44 ` 朱辉
2014-10-22 12:01 ` Peter Hurley
2014-10-23 0:40 ` 朱辉
2014-10-29 14:43 ` Vlastimil Babka
2014-11-03 8:46 ` Hui Zhu
2014-11-04 7:53 ` Minchan Kim
2014-11-04 8:59 ` Hui Zhu
2014-11-04 9:29 ` Vlastimil Babka
2014-11-07 7:06 ` Minchan Kim
2014-10-24 5:25 ` Joonsoo Kim
2014-11-03 7:28 ` Hui Zhu
2014-11-03 8:05 ` Joonsoo Kim
2014-11-04 2:31 ` Joonsoo Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1413430551-22392-4-git-send-email-zhuhui@xiaomi.com \
--to=zhuhui@xiaomi.com \
--cc=akinobu.mita@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=atomlin@redhat.com \
--cc=axboe@fb.com \
--cc=ben@decadent.org.uk \
--cc=ddstreet@ieee.org \
--cc=deller@gmx.de \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=k.khlebnikov@samsung.com \
--cc=keescook@chromium.org \
--cc=kirill.shutemov@linux.intel.com \
--cc=lauraa@codeaurora.org \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=mgorman@suse.de \
--cc=mina86@mina86.com \
--cc=minchan@kernel.org \
--cc=mingo@kernel.org \
--cc=msalter@redhat.com \
--cc=n-horiguchi@ah.jp.nec.com \
--cc=nasa4836@gmail.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=pavel@ucw.cz \
--cc=peterz@infradead.org \
--cc=raistlin@linux.it \
--cc=riel@redhat.com \
--cc=rientjes@google.com \
--cc=rjw@rjwysocki.net \
--cc=sasha.levin@oracle.com \
--cc=suleiman@google.com \
--cc=tangchen@cn.fujitsu.com \
--cc=vbabka@suse.cz \
--cc=vdavydov@parallels.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).