All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, arnd@arndb.de,
	dave.hansen@linux.intel.com, hch@lst.de, hpa@zytor.com,
	jroedel@suse.de, linux-mm@kvack.org, luto@kernel.org,
	mhocko@kernel.org, mingo@elte.hu, mm-commits@vger.kernel.org,
	peterz@infradead.org, rjw@rjwysocki.net, rostedt@goodmis.org,
	tglx@linutronix.de, torvalds@linux-foundation.org,
	vbabka@suse.cz, willy@infradead.org
Subject: [patch 124/128] x86/mm: remove vmalloc faulting
Date: Mon, 01 Jun 2020 21:52:40 -0700	[thread overview]
Message-ID: <20200602045240.lz_qc_cdp%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200601214457.919c35648e96a2b46b573fe1@linux-foundation.org>

From: Joerg Roedel <jroedel@suse.de>
Subject: x86/mm: remove vmalloc faulting

Remove fault handling on vmalloc areas, as the vmalloc code now takes care
of synchronizing changes to all page-tables in the system.

Link: http://lkml.kernel.org/r/20200515140023.25469-8-joro@8bytes.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 arch/x86/include/asm/switch_to.h |   23 ----
 arch/x86/kernel/setup_percpu.c   |    6 -
 arch/x86/mm/fault.c              |  134 -----------------------------
 arch/x86/mm/pti.c                |    8 -
 arch/x86/mm/tlb.c                |   37 --------
 5 files changed, 4 insertions(+), 204 deletions(-)

--- a/arch/x86/include/asm/switch_to.h~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/include/asm/switch_to.h
@@ -12,27 +12,6 @@ struct task_struct *__switch_to_asm(stru
 __visible struct task_struct *__switch_to(struct task_struct *prev,
 					  struct task_struct *next);
 
-/* This runs runs on the previous thread's stack. */
-static inline void prepare_switch_to(struct task_struct *next)
-{
-#ifdef CONFIG_VMAP_STACK
-	/*
-	 * If we switch to a stack that has a top-level paging entry
-	 * that is not present in the current mm, the resulting #PF will
-	 * will be promoted to a double-fault and we'll panic.  Probe
-	 * the new stack now so that vmalloc_fault can fix up the page
-	 * tables if needed.  This can only happen if we use a stack
-	 * in vmap space.
-	 *
-	 * We assume that the stack is aligned so that it never spans
-	 * more than one top-level paging entry.
-	 *
-	 * To minimize cache pollution, just follow the stack pointer.
-	 */
-	READ_ONCE(*(unsigned char *)next->thread.sp);
-#endif
-}
-
 asmlinkage void ret_from_fork(void);
 
 /*
@@ -67,8 +46,6 @@ struct fork_frame {
 
 #define switch_to(prev, next, last)					\
 do {									\
-	prepare_switch_to(next);					\
-									\
 	((last) = __switch_to_asm((prev), (next)));			\
 } while (0)
 
--- a/arch/x86/kernel/setup_percpu.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/kernel/setup_percpu.c
@@ -287,9 +287,9 @@ void __init setup_per_cpu_areas(void)
 	/*
 	 * Sync back kernel address range again.  We already did this in
 	 * setup_arch(), but percpu data also needs to be available in
-	 * the smpboot asm.  We can't reliably pick up percpu mappings
-	 * using vmalloc_fault(), because exception dispatch needs
-	 * percpu data.
+	 * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
+	 * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
+	 * there too.
 	 *
 	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
 	 * this call?
--- a/arch/x86/mm/fault.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/fault.c
@@ -215,44 +215,6 @@ void arch_sync_kernel_mappings(unsigned
 }
 
 /*
- * 32-bit:
- *
- *   Handle a fault on the vmalloc or module mapping area
- */
-static noinline int vmalloc_fault(unsigned long address)
-{
-	unsigned long pgd_paddr;
-	pmd_t *pmd_k;
-	pte_t *pte_k;
-
-	/* Make sure we are in vmalloc area: */
-	if (!(address >= VMALLOC_START && address < VMALLOC_END))
-		return -1;
-
-	/*
-	 * Synchronize this task's top level page-table
-	 * with the 'reference' page table.
-	 *
-	 * Do _not_ use "current" here. We might be inside
-	 * an interrupt in the middle of a task switch..
-	 */
-	pgd_paddr = read_cr3_pa();
-	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-	if (!pmd_k)
-		return -1;
-
-	if (pmd_large(*pmd_k))
-		return 0;
-
-	pte_k = pte_offset_kernel(pmd_k, address);
-	if (!pte_present(*pte_k))
-		return -1;
-
-	return 0;
-}
-NOKPROBE_SYMBOL(vmalloc_fault);
-
-/*
  * Did it hit the DOS screen memory VA from vm86 mode?
  */
 static inline void
@@ -316,79 +278,6 @@ out:
 
 #else /* CONFIG_X86_64: */
 
-/*
- * 64-bit:
- *
- *   Handle a fault on the vmalloc area
- */
-static noinline int vmalloc_fault(unsigned long address)
-{
-	pgd_t *pgd, *pgd_k;
-	p4d_t *p4d, *p4d_k;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-
-	/* Make sure we are in vmalloc area: */
-	if (!(address >= VMALLOC_START && address < VMALLOC_END))
-		return -1;
-
-	/*
-	 * Copy kernel mappings over when needed. This can also
-	 * happen within a race in page table update. In the later
-	 * case just flush:
-	 */
-	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
-	pgd_k = pgd_offset_k(address);
-	if (pgd_none(*pgd_k))
-		return -1;
-
-	if (pgtable_l5_enabled()) {
-		if (pgd_none(*pgd)) {
-			set_pgd(pgd, *pgd_k);
-			arch_flush_lazy_mmu_mode();
-		} else {
-			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
-		}
-	}
-
-	/* With 4-level paging, copying happens on the p4d level. */
-	p4d = p4d_offset(pgd, address);
-	p4d_k = p4d_offset(pgd_k, address);
-	if (p4d_none(*p4d_k))
-		return -1;
-
-	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
-		set_p4d(p4d, *p4d_k);
-		arch_flush_lazy_mmu_mode();
-	} else {
-		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
-	}
-
-	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
-
-	pud = pud_offset(p4d, address);
-	if (pud_none(*pud))
-		return -1;
-
-	if (pud_large(*pud))
-		return 0;
-
-	pmd = pmd_offset(pud, address);
-	if (pmd_none(*pmd))
-		return -1;
-
-	if (pmd_large(*pmd))
-		return 0;
-
-	pte = pte_offset_kernel(pmd, address);
-	if (!pte_present(*pte))
-		return -1;
-
-	return 0;
-}
-NOKPROBE_SYMBOL(vmalloc_fault);
-
 #ifdef CONFIG_CPU_SUP_AMD
 static const char errata93_warning[] =
 KERN_ERR 
@@ -1227,29 +1116,6 @@ do_kern_addr_fault(struct pt_regs *regs,
 	 */
 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
 
-	/*
-	 * We can fault-in kernel-space virtual memory on-demand. The
-	 * 'reference' page table is init_mm.pgd.
-	 *
-	 * NOTE! We MUST NOT take any locks for this case. We may
-	 * be in an interrupt or a critical region, and should
-	 * only copy the information from the master page table,
-	 * nothing more.
-	 *
-	 * Before doing this on-demand faulting, ensure that the
-	 * fault is not any of the following:
-	 * 1. A fault on a PTE with a reserved bit set.
-	 * 2. A fault caused by a user-mode access.  (Do not demand-
-	 *    fault kernel memory due to user-mode accesses).
-	 * 3. A fault caused by a page-level protection violation.
-	 *    (A demand fault would be on a non-present page which
-	 *     would have X86_PF_PROT==0).
-	 */
-	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
-		if (vmalloc_fault(address) >= 0)
-			return;
-	}
-
 	/* Was the fault spurious, caused by lazy TLB invalidation? */
 	if (spurious_kernel_fault(hw_error_code, address))
 		return;
--- a/arch/x86/mm/pti.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/pti.c
@@ -448,13 +448,7 @@ static void __init pti_clone_user_shared
 		 * the sp1 and sp2 slots.
 		 *
 		 * This is done for all possible CPUs during boot to ensure
-		 * that it's propagated to all mms.  If we were to add one of
-		 * these mappings during CPU hotplug, we would need to take
-		 * some measure to make sure that every mm that subsequently
-		 * ran on that CPU would have the relevant PGD entry in its
-		 * pagetables.  The usual vmalloc_fault() mechanism would not
-		 * work for page faults taken in entry_SYSCALL_64 before RSP
-		 * is set up.
+		 * that it's propagated to all mms.
 		 */
 
 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
--- a/arch/x86/mm/tlb.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/tlb.c
@@ -161,34 +161,6 @@ void switch_mm(struct mm_struct *prev, s
 	local_irq_restore(flags);
 }
 
-static void sync_current_stack_to_mm(struct mm_struct *mm)
-{
-	unsigned long sp = current_stack_pointer;
-	pgd_t *pgd = pgd_offset(mm, sp);
-
-	if (pgtable_l5_enabled()) {
-		if (unlikely(pgd_none(*pgd))) {
-			pgd_t *pgd_ref = pgd_offset_k(sp);
-
-			set_pgd(pgd, *pgd_ref);
-		}
-	} else {
-		/*
-		 * "pgd" is faked.  The top level entries are "p4d"s, so sync
-		 * the p4d.  This compiles to approximately the same code as
-		 * the 5-level case.
-		 */
-		p4d_t *p4d = p4d_offset(pgd, sp);
-
-		if (unlikely(p4d_none(*p4d))) {
-			pgd_t *pgd_ref = pgd_offset_k(sp);
-			p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
-
-			set_p4d(p4d, *p4d_ref);
-		}
-	}
-}
-
 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
 {
 	unsigned long next_tif = task_thread_info(next)->flags;
@@ -377,15 +349,6 @@ void switch_mm_irqs_off(struct mm_struct
 		 */
 		cond_ibpb(tsk);
 
-		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
-			/*
-			 * If our current stack is in vmalloc space and isn't
-			 * mapped in the new pgd, we'll double-fault.  Forcibly
-			 * map it.
-			 */
-			sync_current_stack_to_mm(next);
-		}

WARNING: multiple messages have this Message-ID (diff)
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, arnd@arndb.de,
	dave.hansen@linux.intel.com, hch@lst.de, hpa@zytor.com,
	jroedel@suse.de, linux-mm@kvack.org, luto@kernel.org,
	mhocko@kernel.org, mingo@elte.hu, mm-commits@vger.kernel.org,
	peterz@infradead.org, rjw@rjwysocki.net, rostedt@goodmis.org,
	tglx@linutronix.de, torvalds@linux-foundation.org,
	vbabka@suse.cz, willy@infradead.org
Subject: [patch 124/128] x86/mm: remove vmalloc faulting
Date: Mon, 01 Jun 2020 21:52:40 -0700	[thread overview]
Message-ID: <20200602045240.lz_qc_cdp%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200601214457.919c35648e96a2b46b573fe1@linux-foundation.org>

From: Joerg Roedel <jroedel@suse.de>
Subject: x86/mm: remove vmalloc faulting

Remove fault handling on vmalloc areas, as the vmalloc code now takes care
of synchronizing changes to all page-tables in the system.

Link: http://lkml.kernel.org/r/20200515140023.25469-8-joro@8bytes.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 arch/x86/include/asm/switch_to.h |   23 ----
 arch/x86/kernel/setup_percpu.c   |    6 -
 arch/x86/mm/fault.c              |  134 -----------------------------
 arch/x86/mm/pti.c                |    8 -
 arch/x86/mm/tlb.c                |   37 --------
 5 files changed, 4 insertions(+), 204 deletions(-)

--- a/arch/x86/include/asm/switch_to.h~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/include/asm/switch_to.h
@@ -12,27 +12,6 @@ struct task_struct *__switch_to_asm(stru
 __visible struct task_struct *__switch_to(struct task_struct *prev,
 					  struct task_struct *next);
 
-/* This runs runs on the previous thread's stack. */
-static inline void prepare_switch_to(struct task_struct *next)
-{
-#ifdef CONFIG_VMAP_STACK
-	/*
-	 * If we switch to a stack that has a top-level paging entry
-	 * that is not present in the current mm, the resulting #PF will
-	 * will be promoted to a double-fault and we'll panic.  Probe
-	 * the new stack now so that vmalloc_fault can fix up the page
-	 * tables if needed.  This can only happen if we use a stack
-	 * in vmap space.
-	 *
-	 * We assume that the stack is aligned so that it never spans
-	 * more than one top-level paging entry.
-	 *
-	 * To minimize cache pollution, just follow the stack pointer.
-	 */
-	READ_ONCE(*(unsigned char *)next->thread.sp);
-#endif
-}
-
 asmlinkage void ret_from_fork(void);
 
 /*
@@ -67,8 +46,6 @@ struct fork_frame {
 
 #define switch_to(prev, next, last)					\
 do {									\
-	prepare_switch_to(next);					\
-									\
 	((last) = __switch_to_asm((prev), (next)));			\
 } while (0)
 
--- a/arch/x86/kernel/setup_percpu.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/kernel/setup_percpu.c
@@ -287,9 +287,9 @@ void __init setup_per_cpu_areas(void)
 	/*
 	 * Sync back kernel address range again.  We already did this in
 	 * setup_arch(), but percpu data also needs to be available in
-	 * the smpboot asm.  We can't reliably pick up percpu mappings
-	 * using vmalloc_fault(), because exception dispatch needs
-	 * percpu data.
+	 * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
+	 * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
+	 * there too.
 	 *
 	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
 	 * this call?
--- a/arch/x86/mm/fault.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/fault.c
@@ -215,44 +215,6 @@ void arch_sync_kernel_mappings(unsigned
 }
 
 /*
- * 32-bit:
- *
- *   Handle a fault on the vmalloc or module mapping area
- */
-static noinline int vmalloc_fault(unsigned long address)
-{
-	unsigned long pgd_paddr;
-	pmd_t *pmd_k;
-	pte_t *pte_k;
-
-	/* Make sure we are in vmalloc area: */
-	if (!(address >= VMALLOC_START && address < VMALLOC_END))
-		return -1;
-
-	/*
-	 * Synchronize this task's top level page-table
-	 * with the 'reference' page table.
-	 *
-	 * Do _not_ use "current" here. We might be inside
-	 * an interrupt in the middle of a task switch..
-	 */
-	pgd_paddr = read_cr3_pa();
-	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-	if (!pmd_k)
-		return -1;
-
-	if (pmd_large(*pmd_k))
-		return 0;
-
-	pte_k = pte_offset_kernel(pmd_k, address);
-	if (!pte_present(*pte_k))
-		return -1;
-
-	return 0;
-}
-NOKPROBE_SYMBOL(vmalloc_fault);
-
-/*
  * Did it hit the DOS screen memory VA from vm86 mode?
  */
 static inline void
@@ -316,79 +278,6 @@ out:
 
 #else /* CONFIG_X86_64: */
 
-/*
- * 64-bit:
- *
- *   Handle a fault on the vmalloc area
- */
-static noinline int vmalloc_fault(unsigned long address)
-{
-	pgd_t *pgd, *pgd_k;
-	p4d_t *p4d, *p4d_k;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-
-	/* Make sure we are in vmalloc area: */
-	if (!(address >= VMALLOC_START && address < VMALLOC_END))
-		return -1;
-
-	/*
-	 * Copy kernel mappings over when needed. This can also
-	 * happen within a race in page table update. In the later
-	 * case just flush:
-	 */
-	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
-	pgd_k = pgd_offset_k(address);
-	if (pgd_none(*pgd_k))
-		return -1;
-
-	if (pgtable_l5_enabled()) {
-		if (pgd_none(*pgd)) {
-			set_pgd(pgd, *pgd_k);
-			arch_flush_lazy_mmu_mode();
-		} else {
-			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
-		}
-	}
-
-	/* With 4-level paging, copying happens on the p4d level. */
-	p4d = p4d_offset(pgd, address);
-	p4d_k = p4d_offset(pgd_k, address);
-	if (p4d_none(*p4d_k))
-		return -1;
-
-	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
-		set_p4d(p4d, *p4d_k);
-		arch_flush_lazy_mmu_mode();
-	} else {
-		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
-	}
-
-	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
-
-	pud = pud_offset(p4d, address);
-	if (pud_none(*pud))
-		return -1;
-
-	if (pud_large(*pud))
-		return 0;
-
-	pmd = pmd_offset(pud, address);
-	if (pmd_none(*pmd))
-		return -1;
-
-	if (pmd_large(*pmd))
-		return 0;
-
-	pte = pte_offset_kernel(pmd, address);
-	if (!pte_present(*pte))
-		return -1;
-
-	return 0;
-}
-NOKPROBE_SYMBOL(vmalloc_fault);
-
 #ifdef CONFIG_CPU_SUP_AMD
 static const char errata93_warning[] =
 KERN_ERR 
@@ -1227,29 +1116,6 @@ do_kern_addr_fault(struct pt_regs *regs,
 	 */
 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
 
-	/*
-	 * We can fault-in kernel-space virtual memory on-demand. The
-	 * 'reference' page table is init_mm.pgd.
-	 *
-	 * NOTE! We MUST NOT take any locks for this case. We may
-	 * be in an interrupt or a critical region, and should
-	 * only copy the information from the master page table,
-	 * nothing more.
-	 *
-	 * Before doing this on-demand faulting, ensure that the
-	 * fault is not any of the following:
-	 * 1. A fault on a PTE with a reserved bit set.
-	 * 2. A fault caused by a user-mode access.  (Do not demand-
-	 *    fault kernel memory due to user-mode accesses).
-	 * 3. A fault caused by a page-level protection violation.
-	 *    (A demand fault would be on a non-present page which
-	 *     would have X86_PF_PROT==0).
-	 */
-	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
-		if (vmalloc_fault(address) >= 0)
-			return;
-	}
-
 	/* Was the fault spurious, caused by lazy TLB invalidation? */
 	if (spurious_kernel_fault(hw_error_code, address))
 		return;
--- a/arch/x86/mm/pti.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/pti.c
@@ -448,13 +448,7 @@ static void __init pti_clone_user_shared
 		 * the sp1 and sp2 slots.
 		 *
 		 * This is done for all possible CPUs during boot to ensure
-		 * that it's propagated to all mms.  If we were to add one of
-		 * these mappings during CPU hotplug, we would need to take
-		 * some measure to make sure that every mm that subsequently
-		 * ran on that CPU would have the relevant PGD entry in its
-		 * pagetables.  The usual vmalloc_fault() mechanism would not
-		 * work for page faults taken in entry_SYSCALL_64 before RSP
-		 * is set up.
+		 * that it's propagated to all mms.
 		 */
 
 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
--- a/arch/x86/mm/tlb.c~x86-mm-remove-vmalloc-faulting
+++ a/arch/x86/mm/tlb.c
@@ -161,34 +161,6 @@ void switch_mm(struct mm_struct *prev, s
 	local_irq_restore(flags);
 }
 
-static void sync_current_stack_to_mm(struct mm_struct *mm)
-{
-	unsigned long sp = current_stack_pointer;
-	pgd_t *pgd = pgd_offset(mm, sp);
-
-	if (pgtable_l5_enabled()) {
-		if (unlikely(pgd_none(*pgd))) {
-			pgd_t *pgd_ref = pgd_offset_k(sp);
-
-			set_pgd(pgd, *pgd_ref);
-		}
-	} else {
-		/*
-		 * "pgd" is faked.  The top level entries are "p4d"s, so sync
-		 * the p4d.  This compiles to approximately the same code as
-		 * the 5-level case.
-		 */
-		p4d_t *p4d = p4d_offset(pgd, sp);
-
-		if (unlikely(p4d_none(*p4d))) {
-			pgd_t *pgd_ref = pgd_offset_k(sp);
-			p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
-
-			set_p4d(p4d, *p4d_ref);
-		}
-	}
-}
-
 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
 {
 	unsigned long next_tif = task_thread_info(next)->flags;
@@ -377,15 +349,6 @@ void switch_mm_irqs_off(struct mm_struct
 		 */
 		cond_ibpb(tsk);
 
-		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
-			/*
-			 * If our current stack is in vmalloc space and isn't
-			 * mapped in the new pgd, we'll double-fault.  Forcibly
-			 * map it.
-			 */
-			sync_current_stack_to_mm(next);
-		}
-
 		/*
 		 * Stop remote flushes for the previous mm.
 		 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
_


  parent reply	other threads:[~2020-06-02  4:52 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-02  4:44 incoming Andrew Morton
2020-06-02  4:45 ` [patch 001/128] squashfs: migrate from ll_rw_block usage to BIO Andrew Morton
2020-06-02  4:45   ` Andrew Morton
2020-06-02  4:45 ` [patch 002/128] ocfs2: add missing annotation for dlm_empty_lockres() Andrew Morton
2020-06-02  4:45 ` [patch 003/128] ocfs2: mount shared volume without ha stack Andrew Morton
2020-06-02  4:45 ` [patch 004/128] arch/parisc/include/asm/pgtable.h: remove unused `old_pte' Andrew Morton
2020-06-02  4:45 ` [patch 005/128] vfs: track per-sb writeback errors and report them to syncfs Andrew Morton
2020-06-02  4:45 ` [patch 006/128] fs/buffer.c: record blockdev write errors in super_block that it backs Andrew Morton
2020-06-02  4:45 ` [patch 007/128] usercopy: mark dma-kmalloc caches as usercopy caches Andrew Morton
2020-06-02  4:45 ` [patch 008/128] mm/slub.c: fix corrupted freechain in deactivate_slab() Andrew Morton
2020-06-02  4:45 ` [patch 009/128] slub: Remove userspace notifier for cache add/remove Andrew Morton
2020-06-02  4:45 ` [patch 010/128] slub: remove kmalloc under list_lock from list_slab_objects() V2 Andrew Morton
2020-06-02  4:45 ` [patch 011/128] mm/slub: fix stack overruns with SLUB_STATS Andrew Morton
2020-06-02  4:46 ` [patch 012/128] Documentation/vm/slub.rst: s/Toggle/Enable/ Andrew Morton
2020-06-02 13:10   ` Rafael Aquini
2020-06-02  4:46 ` [patch 013/128] mm, dump_page(): do not crash with invalid mapping pointer Andrew Morton
2020-06-02  4:46 ` [patch 014/128] mm: move readahead prototypes from mm.h Andrew Morton
2020-06-02  4:46 ` [patch 015/128] mm: return void from various readahead functions Andrew Morton
2020-06-02  4:46 ` [patch 016/128] mm: ignore return value of ->readpages Andrew Morton
2020-06-02  4:46   ` Andrew Morton
2020-06-02  4:46 ` [patch 017/128] mm: move readahead nr_pages check into read_pages Andrew Morton
2020-06-02  4:46 ` [patch 018/128] mm: add new readahead_control API Andrew Morton
2020-06-02  4:46 ` [patch 019/128] mm: use readahead_control to pass arguments Andrew Morton
2020-06-02  4:46 ` [patch 020/128] mm: rename various 'offset' parameters to 'index' Andrew Morton
2020-06-02  4:46 ` [patch 021/128] mm: rename readahead loop variable to 'i' Andrew Morton
2020-06-02  4:46 ` [patch 022/128] mm: remove 'page_offset' from readahead loop Andrew Morton
2020-06-02  4:46   ` Andrew Morton
2020-06-02  4:46 ` [patch 023/128] mm: put readahead pages in cache earlier Andrew Morton
2020-06-02  4:46 ` [patch 024/128] mm: add readahead address space operation Andrew Morton
2020-06-02  4:46 ` [patch 025/128] mm: move end_index check out of readahead loop Andrew Morton
2020-06-02  4:46 ` [patch 026/128] mm: add page_cache_readahead_unbounded Andrew Morton
2020-06-02  4:46 ` [patch 027/128] mm: document why we don't set PageReadahead Andrew Morton
2020-06-02  4:46 ` [patch 028/128] mm: use memalloc_nofs_save in readahead path Andrew Morton
2020-06-02  4:47 ` [patch 029/128] fs: convert mpage_readpages to mpage_readahead Andrew Morton
2020-06-02  4:47 ` [patch 030/128] btrfs: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 031/128] erofs: convert uncompressed files " Andrew Morton
2020-06-02  4:47 ` [patch 032/128] erofs: convert compressed " Andrew Morton
2020-06-02  4:47   ` Andrew Morton
2020-06-02  4:47 ` [patch 033/128] ext4: convert " Andrew Morton
2020-06-02  4:47 ` [patch 034/128] ext4: pass the inode to ext4_mpage_readpages Andrew Morton
2020-06-02  4:47 ` [patch 035/128] f2fs: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 036/128] f2fs: pass the inode to f2fs_mpage_readpages Andrew Morton
2020-06-02  4:47 ` [patch 037/128] fuse: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 038/128] iomap: " Andrew Morton
2020-06-02  4:47 ` [patch 039/128] include/linux/pagemap.h: introduce attach/detach_page_private Andrew Morton
2020-06-02  4:47 ` [patch 040/128] md: remove __clear_page_buffers and use attach/detach_page_private Andrew Morton
2020-06-02  4:47 ` [patch 041/128] btrfs: " Andrew Morton
2020-06-02 14:19   ` David Sterba
2020-06-02  4:47 ` [patch 042/128] fs/buffer.c: " Andrew Morton
2020-06-02  4:47 ` [patch 043/128] f2fs: " Andrew Morton
2020-06-02  4:47   ` Andrew Morton
2020-06-02  4:47 ` [patch 044/128] iomap: " Andrew Morton
2020-06-02 16:23   ` Darrick J. Wong
2020-06-02  4:47 ` [patch 045/128] ntfs: replace attach_page_buffers with attach_page_private Andrew Morton
2020-06-02  4:48 ` [patch 046/128] orangefs: use attach/detach_page_private Andrew Morton
2020-06-02  4:48 ` [patch 047/128] buffer_head.h: remove attach_page_buffers Andrew Morton
2020-06-02  4:48   ` Andrew Morton
2020-06-02  4:48 ` [patch 048/128] mm/migrate.c: call detach_page_private to cleanup code Andrew Morton
2020-06-02  4:48   ` Andrew Morton
2020-06-02  4:48 ` [patch 049/128] mm_types.h: change set_page_private to inline function Andrew Morton
2020-06-02  4:48 ` [patch 050/128] mm/filemap.c: remove misleading comment Andrew Morton
2020-06-02  4:48 ` [patch 051/128] mm/page-writeback.c: remove unused variable Andrew Morton
2020-06-02  4:48 ` [patch 052/128] mm/writeback: replace PF_LESS_THROTTLE with PF_LOCAL_THROTTLE Andrew Morton
2020-06-02  4:48 ` [patch 053/128] mm/writeback: discard NR_UNSTABLE_NFS, use NR_WRITEBACK instead Andrew Morton
2020-06-02  4:48 ` [patch 054/128] mm/gup.c: update the documentation Andrew Morton
2020-06-02  4:48 ` [patch 055/128] mm/gup: introduce pin_user_pages_unlocked Andrew Morton
2020-06-02  4:48 ` [patch 056/128] ivtv: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-02  4:48 ` [patch 057/128] mm/gup.c: further document vma_permits_fault() Andrew Morton
2020-06-02  4:48 ` [patch 058/128] mm/swapfile: use list_{prev,next}_entry() instead of open-coding Andrew Morton
2020-06-02  4:48 ` [patch 059/128] mm/swap_state: fix a data race in swapin_nr_pages Andrew Morton
2020-06-02  4:48 ` [patch 060/128] mm: swap: properly update readahead statistics in unuse_pte_range() Andrew Morton
2020-06-02  4:48 ` [patch 061/128] mm/swapfile.c: offset is only used when there is more slots Andrew Morton
2020-06-02  4:48 ` [patch 062/128] mm/swapfile.c: explicitly show ssd/non-ssd is handled mutually exclusive Andrew Morton
2020-06-02  4:48   ` Andrew Morton
2020-06-02  4:48 ` [patch 063/128] mm/swapfile.c: remove the unnecessary goto for SSD case Andrew Morton
2020-06-02  4:48 ` [patch 064/128] mm/swapfile.c: simplify the calculation of n_goal Andrew Morton
2020-06-02  4:48   ` Andrew Morton
2020-06-02  4:48 ` [patch 065/128] mm/swapfile.c: remove the extra check in scan_swap_map_slots() Andrew Morton
2020-06-02  4:48   ` Andrew Morton
2020-06-02  4:49 ` [patch 066/128] mm/swapfile.c: found_free could be represented by (tmp < max) Andrew Morton
2020-06-02  4:49 ` [patch 067/128] mm/swapfile.c: tmp is always smaller than max Andrew Morton
2020-06-02  4:49 ` [patch 068/128] mm/swapfile.c: omit a duplicate code by compare tmp and max first Andrew Morton
2020-06-02  4:49 ` [patch 069/128] swap: try to scan more free slots even when fragmented Andrew Morton
2020-06-02  4:49 ` [patch 070/128] mm/swapfile.c: classify SWAP_MAP_XXX to make it more readable Andrew Morton
2020-06-02  4:49 ` [patch 071/128] mm/swapfile.c: __swap_entry_free() always free 1 entry Andrew Morton
2020-06-02  4:49 ` [patch 072/128] mm/swapfile.c: use prandom_u32_max() Andrew Morton
2020-06-02  4:49 ` [patch 073/128] swap: reduce lock contention on swap cache from swap slots allocation Andrew Morton
2020-06-02  4:49 ` [patch 074/128] mm: swapfile: fix /proc/swaps heading and Size/Used/Priority alignment Andrew Morton
2020-06-02  4:49 ` [patch 075/128] include/linux/swap.h: delete meaningless __add_to_swap_cache() declaration Andrew Morton
2020-06-02  4:49 ` [patch 076/128] mm, memcg: add workingset_restore in memory.stat Andrew Morton
2020-06-02  4:49 ` [patch 077/128] mm: memcontrol: simplify value comparison between count and limit Andrew Morton
2020-06-02  4:49 ` [patch 078/128] memcg: expose root cgroup's memory.stat Andrew Morton
2020-06-02  4:49 ` [patch 079/128] mm/memcg: prepare for swap over-high accounting and penalty calculation Andrew Morton
2020-06-02  4:49 ` [patch 080/128] mm/memcg: move penalty delay clamping out of calculate_high_delay() Andrew Morton
2020-06-02  4:49   ` Andrew Morton
2020-06-02  4:49 ` [patch 081/128] mm/memcg: move cgroup high memory limit setting into struct page_counter Andrew Morton
2020-06-02  4:49 ` [patch 082/128] mm/memcg: automatically penalize tasks with high swap use Andrew Morton
2020-06-02  4:49 ` [patch 083/128] memcg: fix memcg_kmem_bypass() for remote memcg charging Andrew Morton
2020-06-02  4:49 ` [patch 084/128] x86: mm: ptdump: calculate effective permissions correctly Andrew Morton
2020-06-02  4:50 ` [patch 085/128] mm: ptdump: expand type of 'val' in note_page() Andrew Morton
2020-06-02  4:50 ` [patch 086/128] /proc/PID/smaps: Add PMD migration entry parsing Andrew Morton
2020-06-02  4:50 ` [patch 087/128] mm/memory: remove unnecessary pte_devmap case in copy_one_pte() Andrew Morton
2020-06-02  4:50 ` [patch 088/128] mm, memory_failure: don't send BUS_MCEERR_AO for action required error Andrew Morton
2020-06-02  4:50 ` [patch 089/128] x86/hyperv: use vmalloc_exec for the hypercall page Andrew Morton
2020-06-02  4:50 ` [patch 090/128] x86: fix vmap arguments in map_irq_stack Andrew Morton
2020-06-02  4:50 ` [patch 091/128] staging: android: ion: use vmap instead of vm_map_ram Andrew Morton
2020-06-02  4:50 ` [patch 092/128] staging: media: ipu3: use vmap instead of reimplementing it Andrew Morton
2020-06-02  4:50   ` Andrew Morton
2020-06-02  4:50 ` [patch 093/128] dma-mapping: use vmap insted " Andrew Morton
2020-06-02  4:50   ` Andrew Morton
2020-06-02  4:50 ` [patch 094/128] powerpc: add an ioremap_phb helper Andrew Morton
2020-06-02  4:50 ` [patch 095/128] powerpc: remove __ioremap_at and __iounmap_at Andrew Morton
2020-06-02  4:50   ` Andrew Morton
2020-06-02  4:50 ` [patch 096/128] mm: remove __get_vm_area Andrew Morton
2020-06-02  4:50   ` Andrew Morton
2020-06-02  4:50 ` [patch 097/128] mm: unexport unmap_kernel_range_noflush Andrew Morton
2020-06-02  4:50 ` [patch 098/128] mm: rename CONFIG_PGTABLE_MAPPING to CONFIG_ZSMALLOC_PGTABLE_MAPPING Andrew Morton
2020-06-02  4:50 ` [patch 099/128] mm: only allow page table mappings for built-in zsmalloc Andrew Morton
2020-06-02  4:51 ` [patch 100/128] mm: pass addr as unsigned long to vb_free Andrew Morton
2020-06-02  4:51 ` [patch 101/128] mm: remove vmap_page_range_noflush and vunmap_page_range Andrew Morton
2020-06-02  4:51   ` Andrew Morton
2020-06-02  4:51 ` [patch 102/128] mm: rename vmap_page_range to map_kernel_range Andrew Morton
2020-06-02  4:51 ` [patch 103/128] mm: don't return the number of pages from map_kernel_range{,_noflush} Andrew Morton
2020-06-02  4:51 ` [patch 104/128] mm: remove map_vm_range Andrew Morton
2020-06-02  4:51 ` [patch 105/128] mm: remove unmap_vmap_area Andrew Morton
2020-06-02  4:51   ` Andrew Morton
2020-06-02  4:51 ` [patch 106/128] mm: remove the prot argument from vm_map_ram Andrew Morton
2020-06-02  4:51 ` [patch 107/128] mm: enforce that vmap can't map pages executable Andrew Morton
2020-06-02  4:51 ` [patch 108/128] gpu/drm: remove the powerpc hack in drm_legacy_sg_alloc Andrew Morton
2020-06-02  4:51   ` Andrew Morton
2020-06-02  4:51 ` [patch 109/128] mm: remove the pgprot argument to __vmalloc Andrew Morton
2020-06-02  4:51 ` [patch 110/128] mm: remove the prot argument to __vmalloc_node Andrew Morton
2020-06-02  4:51 ` [patch 111/128] mm: remove both instances of __vmalloc_node_flags Andrew Morton
2020-06-02  4:51 ` [patch 112/128] mm: remove __vmalloc_node_flags_caller Andrew Morton
2020-06-02  4:51   ` Andrew Morton
2020-06-02  4:51 ` [patch 113/128] mm: switch the test_vmalloc module to use __vmalloc_node Andrew Morton
2020-06-02  4:51   ` Andrew Morton
2020-06-02  4:52 ` [patch 114/128] mm: remove vmalloc_user_node_flags Andrew Morton
2020-06-02  4:52   ` Andrew Morton
2020-06-02  4:52 ` [patch 115/128] arm64: use __vmalloc_node in arch_alloc_vmap_stack Andrew Morton
2020-06-02  4:52 ` [patch 116/128] powerpc: use __vmalloc_node in alloc_vm_stack Andrew Morton
2020-06-02  4:52 ` [patch 117/128] s390: use __vmalloc_node in stack_alloc Andrew Morton
2020-06-02  4:52 ` [patch 118/128] mm: add functions to track page directory modifications Andrew Morton
2020-06-02  4:52 ` [patch 119/128] mm/vmalloc: track which page-table levels were modified Andrew Morton
2020-06-02  4:52 ` [patch 120/128] mm/ioremap: " Andrew Morton
2020-06-02  4:52 ` [patch 121/128] x86/mm/64: implement arch_sync_kernel_mappings() Andrew Morton
2020-06-02  4:52 ` [patch 122/128] x86/mm/32: " Andrew Morton
2020-06-02  4:52 ` [patch 123/128] mm: remove vmalloc_sync_(un)mappings() Andrew Morton
2020-06-02  4:52   ` Andrew Morton
2020-06-02  4:52 ` Andrew Morton [this message]
2020-06-02  4:52   ` [patch 124/128] x86/mm: remove vmalloc faulting Andrew Morton
2020-06-02  4:52 ` [patch 125/128] kasan: fix clang compilation warning due to stack protector Andrew Morton
2020-06-02  4:52 ` [patch 126/128] ubsan: entirely disable alignment checks under UBSAN_TRAP Andrew Morton
2020-06-02  4:52 ` [patch 127/128] mm/mm_init.c: report kasan-tag information stored in page->flags Andrew Morton
2020-06-02  4:52 ` [patch 128/128] kasan: move kasan_report() into report.c Andrew Morton
2020-06-02 19:05 ` + mm-slub-fix-a-memory-leak-in-sysfs_slab_add.patch added to -mm tree Andrew Morton
2020-06-02 19:14 ` [obsolete] sh-drop-config_mtd_m25p80-in-sh7757lcr_defconfig.patch removed from " Andrew Morton
2020-06-02 20:08 ` incoming Andrew Morton
2020-06-02 20:45   ` incoming Linus Torvalds
2020-06-02 21:38     ` incoming Andrew Morton
2020-06-02 22:18       ` incoming Linus Torvalds
2020-06-02 20:09 incoming Andrew Morton
2020-06-02 20:17 ` [patch 124/128] x86/mm: remove vmalloc faulting Andrew Morton
2020-06-02 20:17   ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200602045240.lz_qc_cdp%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mhocko@kernel.org \
    --cc=mingo@elte.hu \
    --cc=mm-commits@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=rjw@rjwysocki.net \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.