From: Andrey Ryabinin <aryabinin@virtuozzo.com>
To: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>,
Andy Lutomirski <luto@kernel.org>,
Dmitry Vyukov <dvyukov@google.com>,
Alexander Potapenko <glider@google.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
"x86@kernel.org" <x86@kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
Andi Kleen <ak@linux.intel.com>,
Dave Hansen <dave.hansen@intel.com>,
linux-arch <linux-arch@vger.kernel.org>,
"linux-mm@kvack.org" <linux-mm@kvack.org>,
LKML <linux-kernel@vger.kernel.org>,
kasan-dev <kasan-dev@googlegroups.com>
Subject: Re: KASAN vs. boot-time switching between 4- and 5-level paging
Date: Thu, 13 Jul 2017 15:58:29 +0300 [thread overview]
Message-ID: <20939b37-efd8-2d32-0040-3682fff927c2@virtuozzo.com> (raw)
In-Reply-To: <20170711190554.zxkpjeg2bt65wtir@black.fi.intel.com>
On 07/11/2017 10:05 PM, Kirill A. Shutemov wrote:
>>> Can use your Signed-off-by for a [cleaned up version of your] patch?
>>
>> Sure.
>
> Another KASAN-releated issue: dumping page tables for KASAN shadow memory
> region takes unreasonable time due to kasan_zero_p?? mapped there.
>
> The patch below helps. Any objections?
>
Well, page tables dump doesn't work at all on 5-level paging.
E.g. I've got this nonsense:
....
---[ Kernel Space ]---
0xffff800000000000-0xffff808000000000 512G pud
---[ Low Kernel Mapping ]---
0xffff808000000000-0xffff810000000000 512G pud
---[ vmalloc() Area ]---
0xffff810000000000-0xffff818000000000 512G pud
---[ Vmemmap ]---
0xffff818000000000-0xffffff0000000000 128512G pud
---[ ESPfix Area ]---
0xffffff0000000000-0x0000000000000000 1T pud
0x0000000000000000-0x0000000000000000 0E pgd
0x0000000000000000-0x0000000000001000 4K RW PCD GLB NX pte
0x0000000000001000-0x0000000000002000 4K pte
0x0000000000002000-0x0000000000003000 4K ro GLB NX pte
0x0000000000003000-0x0000000000004000 4K pte
0x0000000000004000-0x0000000000007000 12K RW GLB NX pte
0x0000000000007000-0x0000000000008000 4K pte
0x0000000000008000-0x0000000000108000 1M RW GLB NX pte
0x0000000000108000-0x0000000000109000 4K pte
0x0000000000109000-0x0000000000189000 512K RW GLB NX pte
0x0000000000189000-0x000000000018a000 4K pte
0x000000000018a000-0x000000000018e000 16K RW GLB NX pte
0x000000000018e000-0x000000000018f000 4K pte
0x000000000018f000-0x0000000000193000 16K RW GLB NX pte
0x0000000000193000-0x0000000000194000 4K pte
... 304 entries skipped ...
---[ EFI Runtime Services ]---
0xffffffef00000000-0xffffffff80000000 66G pud
---[ High Kernel Mapping ]---
0xffffffff80000000-0xffffffffc0000000 1G pud
...
As for KASAN, I think it would be better just to make it work faster, the patch below demonstrates the idea.
---
arch/x86/mm/dump_pagetables.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0470826d2bdc..36515fba86b0 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -13,6 +13,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -307,16 +308,19 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P)
{
int i;
- pmd_t *start;
+ pmd_t *start, *pmd_addr;
pgprotval_t prot;
- start = (pmd_t *)pud_page_vaddr(addr);
+ pmd_addr = start = (pmd_t *)pud_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PMD; i++) {
st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
if (!pmd_none(*start)) {
if (pmd_large(*start) || !pmd_present(*start)) {
prot = pmd_flags(*start);
note_page(m, st, __pgprot(prot), 3);
+ } else if (__pa(pmd_addr) == __pa(kasan_zero_pmd)) {
+ prot = pte_flags(kasan_zero_pte[0]);
+ note_page(m, st, __pgprot(prot), 4);
} else {
walk_pte_level(m, st, *start,
P + i * PMD_LEVEL_MULT);
@@ -349,11 +353,11 @@ static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P)
{
int i;
- pud_t *start;
+ pud_t *start, *pud_addr;
pgprotval_t prot;
pud_t *prev_pud = NULL;
- start = (pud_t *)p4d_page_vaddr(addr);
+ pud_addr = start = (pud_t *)p4d_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PUD; i++) {
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
@@ -362,6 +366,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
if (pud_large(*start) || !pud_present(*start)) {
prot = pud_flags(*start);
note_page(m, st, __pgprot(prot), 2);
+ } else if (__pa(pud_addr) == __pa(kasan_zero_pud)) {
+ prot = pte_flags(kasan_zero_pte[0]);
+ note_page(m, st, __pgprot(prot), 4);
} else {
walk_pmd_level(m, st, *start,
P + i * PUD_LEVEL_MULT);
@@ -385,10 +392,10 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P)
{
int i;
- p4d_t *start;
+ p4d_t *start, *p4d_addr;
pgprotval_t prot;
- start = (p4d_t *)pgd_page_vaddr(addr);
+ p4d_addr = start = (p4d_t *)pgd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_P4D; i++) {
st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
@@ -396,6 +403,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
if (p4d_large(*start) || !p4d_present(*start)) {
prot = p4d_flags(*start);
note_page(m, st, __pgprot(prot), 2);
+ } else if (__pa(p4d_addr) == __pa(kasan_zero_p4d)) {
+ prot = pte_flags(kasan_zero_pte[0]);
+ note_page(m, st, __pgprot(prot), 4);
} else {
walk_pud_level(m, st, *start,
P + i * P4D_LEVEL_MULT);
--
2.13.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-07-13 12:56 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-25 20:33 [PATCHv1, RFC 0/8] Boot-time switching between 4- and 5-level paging Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 1/8] x86/boot/compressed/64: Detect and handle 5-level paging at boot-time Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 2/8] x86/mm: Make virtual memory layout movable for CONFIG_X86_5LEVEL Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 3/8] x86/mm: Make PGDIR_SHIFT and PTRS_PER_P4D variable Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 4/8] x86/mm: Handle boot-time paging mode switching at early boot Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 5/8] x86/mm: Fold p4d page table layer at runtime Kirill A. Shutemov
2017-05-27 15:09 ` Brian Gerst
2017-05-27 22:46 ` Kirill A. Shutemov
2017-05-27 22:56 ` Brian Gerst
2017-05-25 20:33 ` [PATCHv1, RFC 6/8] x86/mm: Replace compile-time checks for 5-level with runtime-time Kirill A. Shutemov
2017-05-25 20:33 ` [PATCHv1, RFC 7/8] x86/mm: Hacks for boot-time switching between 4- and 5-level paging Kirill A. Shutemov
2017-05-26 22:10 ` KASAN vs. " Kirill A. Shutemov
2017-05-29 10:02 ` Dmitry Vyukov
2017-05-29 11:18 ` Andrey Ryabinin
2017-05-29 11:19 ` Dmitry Vyukov
2017-05-29 11:45 ` Andrey Ryabinin
2017-05-29 12:46 ` Andrey Ryabinin
2017-06-01 14:56 ` Andrey Ryabinin
2017-07-10 12:33 ` Kirill A. Shutemov
2017-07-10 12:43 ` Dmitry Vyukov
2017-07-10 14:17 ` Kirill A. Shutemov
2017-07-10 15:56 ` Andy Lutomirski
2017-07-10 18:47 ` Kirill A. Shutemov
2017-07-10 20:07 ` Andy Lutomirski
2017-07-10 21:24 ` Kirill A. Shutemov
2017-07-11 0:30 ` Andy Lutomirski
2017-07-11 10:35 ` Kirill A. Shutemov
2017-07-11 15:06 ` Andy Lutomirski
2017-07-11 15:15 ` Andrey Ryabinin
2017-07-11 16:45 ` Andrey Ryabinin
2017-07-11 17:03 ` Kirill A. Shutemov
2017-07-11 17:29 ` Andrey Ryabinin
2017-07-11 19:05 ` Kirill A. Shutemov
2017-07-13 12:58 ` Andrey Ryabinin [this message]
2017-07-13 13:52 ` Kirill A. Shutemov
2017-07-13 14:15 ` Kirill A. Shutemov
2017-07-13 14:19 ` Andrey Ryabinin
2017-07-24 12:13 ` Kirill A. Shutemov
2017-07-24 14:07 ` Andrey Ryabinin
2017-07-10 16:57 ` Andrey Ryabinin
2017-05-25 20:33 ` [PATCHv1, RFC 8/8] x86/mm: Allow to boot without la57 if CONFIG_X86_5LEVEL=y Kirill A. Shutemov
2017-05-25 23:24 ` [PATCHv1, RFC 0/8] Boot-time switching between 4- and 5-level paging Linus Torvalds
2017-05-26 0:40 ` Andy Lutomirski
2017-05-26 4:18 ` Kevin Easton
2017-05-26 7:21 ` Andy Lutomirski
2017-05-26 13:00 ` Kirill A. Shutemov
2017-05-26 13:35 ` Andi Kleen
2017-05-26 15:51 ` Linus Torvalds
2017-05-26 15:58 ` Kirill A. Shutemov
2017-05-26 16:13 ` Linus Torvalds
2017-05-26 18:24 ` hpa
2017-05-26 19:23 ` Dave Hansen
2017-05-26 19:36 ` hpa
2017-05-26 19:40 ` hpa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20939b37-efd8-2d32-0040-3682fff927c2@virtuozzo.com \
--to=aryabinin@virtuozzo.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@intel.com \
--cc=dvyukov@google.com \
--cc=glider@google.com \
--cc=hpa@zytor.com \
--cc=kasan-dev@googlegroups.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kirill@shutemov.name \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).