From: jeffxu@chromium.org
To: akpm@linux-foundation.org, keescook@chromium.org,
jannh@google.com, sroettger@google.com, willy@infradead.org,
gregkh@linuxfoundation.org, torvalds@linux-foundation.org
Cc: jeffxu@google.com, jorgelo@chromium.org, groeck@chromium.org,
linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org,
linux-mm@kvack.org, surenb@google.com, alex.sierra@amd.com,
apopple@nvidia.com, aneesh.kumar@linux.ibm.com,
axelrasmussen@google.com, ben@decadent.org.uk,
catalin.marinas@arm.com, david@redhat.com, dwmw@amazon.co.uk,
ying.huang@intel.com, hughd@google.com, joey.gouly@arm.com,
corbet@lwn.net, wangkefeng.wang@huawei.com,
Liam.Howlett@oracle.com, lstoakes@gmail.com,
mawupeng1@huawei.com, linmiaohe@huawei.com, namit@vmware.com,
peterx@redhat.com, peterz@infradead.org, ryan.roberts@arm.com,
shr@devkernel.io, vbabka@suse.cz, xiujianfeng@huawei.com,
yu.ma@intel.com, zhangpeng362@huawei.com, dave.hansen@intel.com,
luto@kernel.org, linux-hardening@vger.kernel.org
Subject: [RFC PATCH v2 5/8] mseal: Check seal flag for munmap(2)
Date: Tue, 17 Oct 2023 09:08:12 +0000 [thread overview]
Message-ID: <20231017090815.1067790-6-jeffxu@chromium.org> (raw)
In-Reply-To: <20231017090815.1067790-1-jeffxu@chromium.org>
From: Jeff Xu <jeffxu@google.com>
munmap(2) unmap VMAs in the given address range.
Sealing will prevent unintended munmap(2) call.
What this patch does:
When a munmap(2) is invoked, if one of its VMAs has MM_SEAL_MUNMAP
set from previous mseal(2) call, this munmap(2) will fail,
without any VMA modified.
This patch is based on following:
1. At syscall entry point: SYSCALL_DEFINE2(munmap, ...)
Pass checkSeals = MM_SEAL_MUNMAP into __vm_munmap(),
in turn, to do_vmi_munmap().
Of all the call paths that call into do_vmi_munmap(),
this is the only place where checkSeals = MM_SEAL_MUNMAP.
The rest has checkSeals = 0.
2. In do_vmi_munmap(), calls can_modify_mm() before any
update is made to VMAs.
Signed-off-by: Jeff Xu <jeffxu@google.com>
---
include/linux/mm.h | 2 +-
mm/mmap.c | 21 +++++++++++++--------
mm/mremap.c | 5 +++--
3 files changed, 17 insertions(+), 11 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b09df8501987..f2f316522f2a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3279,7 +3279,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool unlock);
+ bool unlock, unsigned long checkSeals);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
diff --git a/mm/mmap.c b/mm/mmap.c
index 414ac31aa9fa..62d592f16f45 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2601,6 +2601,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
* @unlock: set to true if the user wants to drop the mmap_lock on success
+ * @checkSeals: seal type to check.
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
@@ -2611,7 +2612,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool unlock)
+ bool unlock, unsigned long checkSeals)
{
unsigned long end;
struct vm_area_struct *vma;
@@ -2623,6 +2624,9 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
if (end == start)
return -EINVAL;
+ if (!can_modify_mm(mm, start, end, checkSeals))
+ return -EACCES;
+
/* arch_unmap() might do unmaps itself. */
arch_unmap(mm, start, end);
@@ -2650,7 +2654,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
{
VMA_ITERATOR(vmi, mm, start);
- return do_vmi_munmap(&vmi, mm, start, len, uf, false);
+ return do_vmi_munmap(&vmi, mm, start, len, uf, false, 0);
}
unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2684,7 +2688,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
}
/* Unmap any existing mapping in the area */
- if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
+ if (do_vmi_munmap(&vmi, mm, addr, len, uf, false, 0))
return -ENOMEM;
/*
@@ -2909,7 +2913,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return error;
}
-static int __vm_munmap(unsigned long start, size_t len, bool unlock)
+static int __vm_munmap(unsigned long start, size_t len, bool unlock,
+ unsigned long checkSeals)
{
int ret;
struct mm_struct *mm = current->mm;
@@ -2919,7 +2924,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+ ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock, checkSeals);
if (ret || !unlock)
mmap_write_unlock(mm);
@@ -2929,14 +2934,14 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)
int vm_munmap(unsigned long start, size_t len)
{
- return __vm_munmap(start, len, false);
+ return __vm_munmap(start, len, false, 0);
}
EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
- return __vm_munmap(addr, len, true);
+ return __vm_munmap(addr, len, true, MM_SEAL_MUNMAP);
}
@@ -3168,7 +3173,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (ret)
goto limits_failed;
- ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
+ ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0, 0);
if (ret)
goto munmap_failed;
diff --git a/mm/mremap.c b/mm/mremap.c
index 056478c106ee..ac363937f8c4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -715,7 +715,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}
vma_iter_init(&vmi, mm, old_addr);
- if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
+ if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false,
+ 0)) {
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
@@ -1009,7 +1010,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
- &uf_unmap, true);
+ &uf_unmap, true, 0);
if (ret)
goto out;
--
2.42.0.655.g421f12c284-goog
next prev parent reply other threads:[~2023-10-17 9:08 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-17 9:08 [RFC PATCH v2 0/8] Introduce mseal() syscall jeffxu
2023-10-17 9:08 ` [RFC PATCH v2 1/8] mseal: Add mseal(2) syscall jeffxu
2023-10-17 15:45 ` Randy Dunlap
2023-10-17 9:08 ` [RFC PATCH v2 2/8] mseal: Wire up mseal syscall jeffxu
2023-10-17 9:08 ` [RFC PATCH v2 3/8] mseal: add can_modify_mm and can_modify_vma jeffxu
2023-10-17 9:08 ` [RFC PATCH v2 4/8] mseal: Check seal flag for mprotect(2) jeffxu
2023-10-17 9:08 ` jeffxu [this message]
2023-10-17 16:54 ` [RFC PATCH v2 5/8] mseal: Check seal flag for munmap(2) Linus Torvalds
2023-10-18 15:08 ` Jeff Xu
2023-10-18 17:14 ` Jeff Xu
2023-10-18 18:27 ` Linus Torvalds
2023-10-18 19:07 ` Jeff Xu
2023-10-17 9:08 ` [RFC PATCH v2 6/8] mseal: Check seal flag for mremap(2) jeffxu
2023-10-20 13:56 ` Muhammad Usama Anjum
2023-10-17 9:08 ` [RFC PATCH v2 7/8] mseal:Check seal flag for mmap(2) jeffxu
2023-10-17 17:04 ` Linus Torvalds
2023-10-17 17:43 ` Linus Torvalds
2023-10-18 7:01 ` Jeff Xu
2023-10-19 7:27 ` Stephen Röttger
2023-10-17 9:08 ` [RFC PATCH v2 8/8] selftest mm/mseal mprotect/munmap/mremap/mmap jeffxu
2023-10-20 14:24 ` Muhammad Usama Anjum
2023-10-20 15:23 ` Peter Zijlstra
2023-10-20 16:33 ` Muhammad Usama Anjum
2023-10-19 9:19 ` [RFC PATCH v2 0/8] Introduce mseal() syscall David Laight
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231017090815.1067790-6-jeffxu@chromium.org \
--to=jeffxu@chromium.org \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=alex.sierra@amd.com \
--cc=aneesh.kumar@linux.ibm.com \
--cc=apopple@nvidia.com \
--cc=axelrasmussen@google.com \
--cc=ben@decadent.org.uk \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=dwmw@amazon.co.uk \
--cc=gregkh@linuxfoundation.org \
--cc=groeck@chromium.org \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=jeffxu@google.com \
--cc=joey.gouly@arm.com \
--cc=jorgelo@chromium.org \
--cc=keescook@chromium.org \
--cc=linmiaohe@huawei.com \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lstoakes@gmail.com \
--cc=luto@kernel.org \
--cc=mawupeng1@huawei.com \
--cc=namit@vmware.com \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=ryan.roberts@arm.com \
--cc=shr@devkernel.io \
--cc=sroettger@google.com \
--cc=surenb@google.com \
--cc=torvalds@linux-foundation.org \
--cc=vbabka@suse.cz \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
--cc=xiujianfeng@huawei.com \
--cc=ying.huang@intel.com \
--cc=yu.ma@intel.com \
--cc=zhangpeng362@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).