linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: Jann Horn <jannh@google.com>
Cc: yu-cheng.yu@intel.com, Andy Lutomirski <luto@amacapital.net>,
	the arch/x86 maintainers <x86@kernel.org>,
	"H . Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	kernel list <linux-kernel@vger.kernel.org>,
	linux-doc@vger.kernel.org, Linux-MM <linux-mm@kvack.org>,
	linux-arch <linux-arch@vger.kernel.org>,
	Linux API <linux-api@vger.kernel.org>,
	Arnd Bergmann <arnd@arndb.de>,
	Balbir Singh <bsingharora@gmail.com>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	hjl.tools@gmail.com, Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>,
	Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	Peter Zijlstra <peterz@infradead.org>,
	rdunlap@infradead.org, ravi.v.shankar@intel.com,
	vedvyas.shanbhogue@intel.com,
	Daniel Micay <danielmicay@gmail.com>
Subject: Re: [PATCH v5 07/27] mm/mmap: Create a guard area between VMAs
Date: Fri, 12 Oct 2018 06:17:28 -0700	[thread overview]
Message-ID: <20181012131728.GA28309@bombadil.infradead.org> (raw)
In-Reply-To: <CAG48ez3R7XL8MX_sjff1FFYuARX_58wA_=ACbv2im-XJKR8tvA@mail.gmail.com>

On Thu, Oct 11, 2018 at 10:39:24PM +0200, Jann Horn wrote:
> Sorry to bring this up so late, but Daniel Micay pointed out to me
> that, given that VMA guards will raise the number of VMAs by
> inhibiting vma_merge(), people are more likely to run into
> /proc/sys/vm/max_map_count (which limits the number of VMAs to ~65k by
> default, and can't easily be raised without risking an overflow of
> page->_mapcount on systems with over ~800GiB of RAM, see
> https://lore.kernel.org/lkml/20180208021112.GB14918@bombadil.infradead.org/
> and replies) with this change.
> 
[...]
> 
> Arguably the proper solution to this would be to raise the default
> max_map_count to be much higher; but then that requires fixing the
> mapcount overflow.

I have a fix that nobody has any particular reaction to:

diff --git a/mm/internal.h b/mm/internal.h
index 7059a8389194..977852b8329e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -97,6 +97,11 @@ extern void putback_lru_page(struct page *page);
  */
 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 
+#ifdef CONFIG_64BIT
+extern void mm_mapcount_overflow(struct page *page);
+#else
+static inline void mm_mapcount_overflow(struct page *page) { }
+#endif
 /*
  * in mm/page_alloc.c
  */
diff --git a/mm/mmap.c b/mm/mmap.c
index 9efdc021ad22..575766ec02f8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1315,6 +1315,115 @@ static inline int mlock_future_check(struct mm_struct *mm,
 	return 0;
 }
 
+#ifdef CONFIG_64BIT
+/*
+ * Machines with more than 2TB of memory can create enough VMAs to overflow
+ * page->_mapcount if they all point to the same page.  32-bit machines do
+ * not need to be concerned.
+ */
+/*
+ * Experimentally determined.  gnome-shell currently uses fewer than
+ * 3000 mappings, so should have zero effect on desktop users.
+ */
+#define mm_track_threshold	5000
+static DEFINE_SPINLOCK(heavy_users_lock);
+static DEFINE_IDR(heavy_users);
+
+static void mmap_track_user(struct mm_struct *mm, int max)
+{
+	struct mm_struct *entry;
+	unsigned int id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&heavy_users_lock);
+	idr_for_each_entry(&heavy_users, entry, id) {
+		if (entry == mm)
+			break;
+		if (entry->map_count < mm_track_threshold)
+			idr_remove(&heavy_users, id);
+	}
+	if (!entry)
+		idr_alloc(&heavy_users, mm, 0, 0, GFP_ATOMIC);
+	spin_unlock(&heavy_users_lock);
+}
+
+static void mmap_untrack_user(struct mm_struct *mm)
+{
+	struct mm_struct *entry;
+	unsigned int id;
+
+	spin_lock(&heavy_users_lock);
+	idr_for_each_entry(&heavy_users, entry, id) {
+		if (entry == mm) {
+			idr_remove(&heavy_users, id);
+			break;
+		}
+	}
+	spin_unlock(&heavy_users_lock);
+}
+
+static void kill_mm(struct task_struct *tsk)
+{
+	/* Tear down the mappings first */
+	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, tsk, true);
+}
+
+static void kill_abuser(struct mm_struct *mm)
+{
+	struct task_struct *tsk;
+
+	for_each_process(tsk)
+		if (tsk->mm == mm)
+			break;
+
+	if (down_write_trylock(&mm->mmap_sem)) {
+		kill_mm(tsk);
+		up_write(&mm->mmap_sem);
+	} else {
+		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, tsk, true);
+	}
+}
+
+void mm_mapcount_overflow(struct page *page)
+{
+	struct mm_struct *entry = current->mm;
+	unsigned int id;
+	struct vm_area_struct *vma;
+	struct address_space *mapping = page_mapping(page);
+	unsigned long pgoff = page_to_pgoff(page);
+	unsigned int count = 0;
+
+	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + 1) {
+		if (vma->vm_mm == entry)
+			count++;
+		if (count > 1000)
+			kill_mm(current);
+	}
+
+	rcu_read_lock();
+	idr_for_each_entry(&heavy_users, entry, id) {
+		count = 0;
+
+		vma_interval_tree_foreach(vma, &mapping->i_mmap,
+				pgoff, pgoff + 1) {
+			if (vma->vm_mm == entry)
+				count++;
+			if (count > 1000) {
+				kill_abuser(entry);
+				goto out;
+			}
+		}
+	}
+	if (!entry)
+		panic("No abusers found but mapcount exceeded\n");
+out:
+	rcu_read_unlock();
+}
+#else
+static void mmap_track_user(struct mm_struct *mm, int max) { }
+static void mmap_untrack_user(struct mm_struct *mm) { }
+#endif
+
 /*
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
@@ -1357,6 +1466,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
 	/* Too many mappings? */
 	if (mm->map_count > sysctl_max_map_count)
 		return -ENOMEM;
+	if (mm->map_count > mm_track_threshold)
+		mmap_track_user(mm, mm_track_threshold);
 
 	/* Obtain the address to map to. we verify (or select) it and ensure
 	 * that it represents a valid section of the address space.
@@ -2997,6 +3108,8 @@ void exit_mmap(struct mm_struct *mm)
 	/* mm's last user has gone, and its about to be pulled down */
 	mmu_notifier_release(mm);
 
+	mmap_untrack_user(mm);
+
 	if (mm->locked_vm) {
 		vma = mm->mmap;
 		while (vma) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 47db27f8049e..d88acf5c98e9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1190,6 +1190,7 @@ void page_add_file_rmap(struct page *page, bool compound)
 		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 		__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
 	} else {
+		int v;
 		if (PageTransCompound(page) && page_mapping(page)) {
 			VM_WARN_ON_ONCE(!PageLocked(page));
 
@@ -1197,8 +1198,13 @@ void page_add_file_rmap(struct page *page, bool compound)
 			if (PageMlocked(page))
 				clear_page_mlock(compound_head(page));
 		}
-		if (!atomic_inc_and_test(&page->_mapcount))
+		v = atomic_inc_return(&page->_mapcount);
+		if (likely(v > 0))
 			goto out;
+		if (unlikely(v < 0)) {
+			mm_mapcount_overflow(page);
+			goto out;
+		}
 	}
 	__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
 out:

  parent reply	other threads:[~2018-10-12 13:17 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-11 15:14 [PATCH v5 00/27] Control Flow Enforcement: Shadow Stack Yu-cheng Yu
2018-10-11 15:14 ` [PATCH v5 01/27] x86/cpufeatures: Add CPUIDs for Control Flow Enforcement Technology (CET) Yu-cheng Yu
2018-10-11 16:43   ` Borislav Petkov
2018-10-11 16:45     ` Yu-cheng Yu
2018-10-11 15:14 ` [PATCH v5 02/27] x86/fpu/xstate: Change names to separate XSAVES system and user states Yu-cheng Yu
2018-10-15 17:03   ` Borislav Petkov
2018-10-11 15:14 ` [PATCH v5 03/27] x86/fpu/xstate: Introduce XSAVES system states Yu-cheng Yu
2018-10-17 10:41   ` Borislav Petkov
2018-10-17 22:39     ` Randy Dunlap
2018-10-17 22:58       ` Borislav Petkov
2018-10-17 23:17         ` Randy Dunlap
2018-10-18  9:26           ` Borislav Petkov
2018-10-18  9:31             ` Pavel Machek
2018-10-18 12:10               ` Borislav Petkov
2018-10-18 18:33             ` Randy Dunlap
2018-10-18  9:24         ` Pavel Machek
2018-10-11 15:15 ` [PATCH v5 04/27] x86/fpu/xstate: Add XSAVES system states for shadow stack Yu-cheng Yu
2018-11-08 18:40   ` Borislav Petkov
2018-11-08 20:40     ` Yu-cheng Yu
2018-11-08 23:52       ` Borislav Petkov
2018-11-11 11:31       ` Pavel Machek
2018-11-11 11:31     ` Pavel Machek
2018-11-11 14:59       ` Andy Lutomirski
2018-11-11 19:02         ` Pavel Machek
2018-11-08 20:46   ` Andy Lutomirski
2018-11-08 21:01     ` Yu-cheng Yu
2018-11-08 21:22       ` Andy Lutomirski
2018-11-08 21:31         ` Cyrill Gorcunov
2018-11-08 22:01           ` Andy Lutomirski
2018-11-08 22:18             ` Cyrill Gorcunov
2018-11-08 21:48         ` Dave Hansen
2018-11-08 22:00           ` Matthew Wilcox
2018-11-08 23:35             ` Dave Hansen
2018-11-09  0:32               ` Matthew Wilcox
2018-11-09  0:45                 ` Andy Lutomirski
2018-11-09 17:13                 ` Dave Hansen
2018-11-09 17:17                   ` Matthew Wilcox
2018-11-09 17:20                     ` Dave Hansen
2018-11-09 17:28                       ` Dave Hansen
2018-11-11 11:31         ` Pavel Machek
2018-10-11 15:15 ` [PATCH v5 05/27] Documentation/x86: Add CET description Yu-cheng Yu
2018-11-13 18:43   ` Borislav Petkov
2018-11-13 21:02     ` Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 06/27] x86/cet: Control protection exception handler Yu-cheng Yu
2018-11-14 18:44   ` Borislav Petkov
2018-11-14 20:19     ` Yu-cheng Yu
2018-11-14 20:28       ` Borislav Petkov
2018-10-11 15:15 ` [PATCH v5 07/27] mm/mmap: Create a guard area between VMAs Yu-cheng Yu
2018-10-11 20:39   ` Jann Horn
2018-10-11 20:49     ` Yu-cheng Yu
2018-10-11 20:55     ` Andy Lutomirski
2018-10-12 21:49       ` Yu-cheng Yu
2018-10-12 13:17     ` Matthew Wilcox [this message]
2018-10-11 20:49   ` Dave Hansen
2018-10-12 10:24     ` Florian Weimer
2018-10-11 15:15 ` [PATCH v5 08/27] x86/cet/shstk: Add Kconfig option for user-mode shadow stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 09/27] mm: Introduce VM_SHSTK for shadow stack memory Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 10/27] mm/mmap: Prevent Shadow Stack VMA merges Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 11/27] x86/mm: Change _PAGE_DIRTY to _PAGE_DIRTY_HW Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 12/27] x86/mm: Introduce _PAGE_DIRTY_SW Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 13/27] drm/i915/gvt: Update _PAGE_DIRTY to _PAGE_DIRTY_BITS Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 14/27] x86/mm: Modify ptep_set_wrprotect and pmdp_set_wrprotect for _PAGE_DIRTY_SW Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 15/27] x86/mm: Shadow stack page fault error checking Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 16/27] mm: Handle shadow stack page fault Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 17/27] mm: Handle THP/HugeTLB " Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 18/27] mm: Update can_follow_write_pte/pmd for shadow stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 19/27] mm: Introduce do_mmap_locked() Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 20/27] x86/cet/shstk: User-mode shadow stack support Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 21/27] x86/cet/shstk: Introduce WRUSS instruction Yu-cheng Yu
2018-11-06 18:43   ` Dave Hansen
2018-11-06 18:55     ` Andy Lutomirski
2018-11-06 20:21     ` Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 22/27] x86/cet/shstk: Signal handling for shadow stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 23/27] x86/cet/shstk: ELF header parsing of Shadow Stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 24/27] x86/cet/shstk: Handle thread shadow stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 25/27] mm/mmap: Add Shadow stack pages to memory accounting Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 26/27] x86/cet/shstk: Add arch_prctl functions for Shadow Stack Yu-cheng Yu
2018-10-11 15:15 ` [PATCH v5 27/27] x86/cet/shstk: Add Shadow Stack instructions to opcode map Yu-cheng Yu
2018-10-11 19:21 ` [PATCH v5 00/27] Control Flow Enforcement: Shadow Stack Dave Hansen
2018-10-11 19:29   ` Yu-cheng Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181012131728.GA28309@bombadil.infradead.org \
    --to=willy@infradead.org \
    --cc=arnd@arndb.de \
    --cc=bsingharora@gmail.com \
    --cc=corbet@lwn.net \
    --cc=danielmicay@gmail.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=esyr@redhat.com \
    --cc=fweimer@redhat.com \
    --cc=gorcunov@gmail.com \
    --cc=hjl.tools@gmail.com \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=keescook@chromium.org \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=nadav.amit@gmail.com \
    --cc=oleg@redhat.com \
    --cc=pavel@ucw.cz \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=vedvyas.shanbhogue@intel.com \
    --cc=x86@kernel.org \
    --cc=yu-cheng.yu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).