All of lore.kernel.org
 help / color / mirror / Atom feed
From: Petr Tesarik <petrtesarik@huaweicloud.com>
To: Dave Hansen <dave.hansen@intel.com>
Cc: "Petr Tesařík" <petr@tesarici.cz>,
	"Petr Tesarik" <petrtesarik@huaweicloud.com>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Dave Hansen" <dave.hansen@linux.intel.com>,
	"maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)"
	<x86@kernel.org>, "H. Peter Anvin" <hpa@zytor.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"Oleg Nesterov" <oleg@redhat.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Xin Li" <xin3.li@intel.com>, "Arnd Bergmann" <arnd@arndb.de>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Rick Edgecombe" <rick.p.edgecombe@intel.com>,
	"Kees Cook" <keescook@chromium.org>,
	"Masami Hiramatsu (Google)" <mhiramat@kernel.org>,
	"Pengfei Xu" <pengfei.xu@intel.com>,
	"Josh Poimboeuf" <jpoimboe@kernel.org>,
	"Ze Gao" <zegao2021@gmail.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	"Kai Huang" <kai.huang@intel.com>,
	"David Woodhouse" <dwmw@amazon.co.uk>,
	"Brian Gerst" <brgerst@gmail.com>,
	"Jason Gunthorpe" <jgg@ziepe.ca>,
	"Joerg Roedel" <jroedel@suse.de>,
	"Mike Rapoport (IBM)" <rppt@kernel.org>,
	"Tina Zhang" <tina.zhang@intel.com>,
	"Jacob Pan" <jacob.jun.pan@linux.intel.com>,
	"open list:DOCUMENTATION" <linux-doc@vger.kernel.org>,
	"open list" <linux-kernel@vger.kernel.org>,
	"Roberto Sassu" <roberto.sassu@huaweicloud.com>,
	"John Johansen" <john.johansen@canonical.com>,
	"Paul Moore" <paul@paul-moore.com>,
	"James Morris" <jmorris@namei.org>,
	"Serge E. Hallyn" <serge@hallyn.com>,
	apparmor@lists.ubuntu.com, linux-security-module@vger.kernel.org,
	"Petr Tesarik" <petr.tesarik1@huawei-partners.com>
Subject: [RFC 4/5] sbm: fix up calls to dynamic memory allocators
Date: Thu, 22 Feb 2024 14:12:29 +0100	[thread overview]
Message-ID: <20240222131230.635-5-petrtesarik@huaweicloud.com> (raw)
In-Reply-To: <20240222131230.635-1-petrtesarik@huaweicloud.com>

From: Petr Tesarik <petr.tesarik1@huawei-partners.com>

Add fixup functions to call kmalloc(), vmalloc() and friends on behalf of
the sandbox code.

Signed-off-by: Petr Tesarik <petr.tesarik1@huawei-partners.com>
---
 arch/x86/kernel/sbm/core.c | 81 ++++++++++++++++++++++++++++++++++++++
 mm/slab_common.c           |  3 +-
 mm/slub.c                  | 17 ++++----
 mm/vmalloc.c               | 11 +++---
 4 files changed, 98 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kernel/sbm/core.c b/arch/x86/kernel/sbm/core.c
index c8ac7ecb08cc..3cf3842292b9 100644
--- a/arch/x86/kernel/sbm/core.c
+++ b/arch/x86/kernel/sbm/core.c
@@ -20,6 +20,12 @@
 #include <linux/sbm.h>
 #include <linux/sched/task_stack.h>
 
+/*
+ * FIXME: Remove these includes when there is proper API for defining
+ * which functions can be called from sandbox mode.
+ */
+#include <linux/vmalloc.h>
+
 #define GFP_SBM_PGTABLE	(GFP_KERNEL | __GFP_ZERO)
 #define PGD_ORDER	get_order(sizeof(pgd_t) * PTRS_PER_PGD)
 
@@ -52,8 +58,83 @@ struct sbm_fixup {
 	sbm_proxy_call_fn proxy;
 };
 
+static int map_range(struct x86_sbm_state *state, unsigned long start,
+		     unsigned long end, pgprot_t prot);
+
+/* Map the newly allocated dynamic memory region. */
+static unsigned long post_alloc(struct x86_sbm_state *state,
+				unsigned long objp, size_t size)
+{
+	int err;
+
+	if (!objp)
+		return objp;
+
+	err = map_range(state, objp, objp + size, PAGE_SHARED);
+	if (err) {
+		kfree((void*)objp);
+		return 0UL;
+	}
+	return objp;
+}
+
+/* Allocation proxy handler if size is the 1st parameter. */
+static unsigned long proxy_alloc1(struct x86_sbm_state *state,
+				    unsigned long func, struct pt_regs *regs)
+{
+	unsigned long objp;
+
+	objp = x86_sbm_proxy_call(state, func, regs);
+	return post_alloc(state, objp, regs->di);
+}
+
+/* Allocation proxy handler if size is the 2nd parameter. */
+static unsigned long proxy_alloc2(struct x86_sbm_state *state,
+				    unsigned long func, struct pt_regs *regs)
+{
+	unsigned long objp;
+
+	objp = x86_sbm_proxy_call(state, func, regs);
+	return post_alloc(state, objp, regs->si);
+}
+
+/* Allocation proxy handler if size is the 3rd parameter. */
+static unsigned long proxy_alloc3(struct x86_sbm_state *state,
+				    unsigned long func, struct pt_regs *regs)
+{
+	unsigned long objp;
+
+	objp = x86_sbm_proxy_call(state, func, regs);
+	return post_alloc(state, objp, regs->dx);
+}
+
+/* Proxy handler to free previously allocated memory. */
+static unsigned long proxy_free(struct x86_sbm_state *state,
+				unsigned long func, struct pt_regs *regs)
+{
+	/* TODO: unmap allocated addresses from sandbox! */
+	return x86_sbm_proxy_call(state, func, regs);
+}
+
 static const struct sbm_fixup fixups[] =
 {
+	/* kmalloc() and friends */
+	{ kmalloc_trace, proxy_alloc3 },
+	{ __kmalloc, proxy_alloc1 },
+	{ __kmalloc_node, proxy_alloc1 },
+	{ __kmalloc_node_track_caller, proxy_alloc1 },
+	{ kmalloc_large, proxy_alloc1 },
+	{ kmalloc_large_node, proxy_alloc1 },
+	{ krealloc, proxy_alloc2 },
+	{ kfree, proxy_free },
+
+	/* vmalloc() and friends */
+	{ vmalloc, proxy_alloc1 },
+	{ __vmalloc, proxy_alloc1 },
+	{ __vmalloc_node, proxy_alloc1 },
+	{ vzalloc, proxy_alloc1 },
+	{ vfree, proxy_free },
+
 	{ }
 };
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 238293b1dbe1..2b72118d9bfa 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -28,6 +28,7 @@
 #include <asm/page.h>
 #include <linux/memcontrol.h>
 #include <linux/stackdepot.h>
+#include <linux/sbm.h>
 
 #include "internal.h"
 #include "slab.h"
@@ -1208,7 +1209,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void * __nosbm krealloc(const void *p, size_t new_size, gfp_t flags)
 {
 	void *ret;
 
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..5f2290fe4df0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -42,6 +42,7 @@
 #include <kunit/test.h>
 #include <kunit/test-bug.h>
 #include <linux/sort.h>
+#include <linux/sbm.h>
 
 #include <linux/debugfs.h>
 #include <trace/events/kmem.h>
@@ -3913,7 +3914,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
  * directly to the page allocator. We use __GFP_COMP, because we will need to
  * know the allocation order to free the pages properly in kfree.
  */
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
+static void * __nosbm __kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
 	struct folio *folio;
 	void *ptr = NULL;
@@ -3938,7 +3939,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
 	return ptr;
 }
 
-void *kmalloc_large(size_t size, gfp_t flags)
+void * __nosbm kmalloc_large(size_t size, gfp_t flags)
 {
 	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
 
@@ -3983,26 +3984,26 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	return ret;
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void * __nosbm __kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
 }
 EXPORT_SYMBOL(__kmalloc_node);
 
-void *__kmalloc(size_t size, gfp_t flags)
+void * __nosbm __kmalloc(size_t size, gfp_t flags)
 {
 	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
 }
 EXPORT_SYMBOL(__kmalloc);
 
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
-				  int node, unsigned long caller)
+void * __nosbm __kmalloc_node_track_caller(size_t size, gfp_t flags,
+					   int node, unsigned long caller)
 {
 	return __do_kmalloc_node(size, flags, node, caller);
 }
 EXPORT_SYMBOL(__kmalloc_node_track_caller);
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+void * __nosbm kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
 					    _RET_IP_, size);
@@ -4386,7 +4387,7 @@ static void free_large_kmalloc(struct folio *folio, void *object)
  *
  * If @object is NULL, no operation is performed.
  */
-void kfree(const void *object)
+void __nosbm kfree(const void *object)
 {
 	struct folio *folio;
 	struct slab *slab;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d12a17fc0c17..d7a5b715ac03 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,6 +40,7 @@
 #include <linux/pgtable.h>
 #include <linux/hugetlb.h>
 #include <linux/sched/mm.h>
+#include <linux/sbm.h>
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
 
@@ -2804,7 +2805,7 @@ void vfree_atomic(const void *addr)
  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
  * conventions for vfree() arch-dependent would be a really bad idea).
  */
-void vfree(const void *addr)
+void __nosbm vfree(const void *addr)
 {
 	struct vm_struct *vm;
 	int i;
@@ -3379,7 +3380,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *__vmalloc_node(unsigned long size, unsigned long align,
+void * __nosbm __vmalloc_node(unsigned long size, unsigned long align,
 			    gfp_t gfp_mask, int node, const void *caller)
 {
 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
@@ -3394,7 +3395,7 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
 EXPORT_SYMBOL_GPL(__vmalloc_node);
 #endif
 
-void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+void * __nosbm __vmalloc(unsigned long size, gfp_t gfp_mask)
 {
 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
 				__builtin_return_address(0));
@@ -3413,7 +3414,7 @@ EXPORT_SYMBOL(__vmalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc(unsigned long size)
+void * __nosbm vmalloc(unsigned long size)
 {
 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
 				__builtin_return_address(0));
@@ -3453,7 +3454,7 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vzalloc(unsigned long size)
+void * __nosbm vzalloc(unsigned long size)
 {
 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
 				__builtin_return_address(0));
-- 
2.34.1


  parent reply	other threads:[~2024-02-22 13:14 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-14 11:35 [PATCH v1 0/8] x86_64 SandBox Mode arch hooks Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 1/8] sbm: x86: page table " Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 2/8] sbm: x86: execute target function on sandbox mode stack Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 3/8] sbm: x86: map system data structures into the sandbox Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 4/8] sbm: x86: allocate and map an exception stack Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 5/8] sbm: x86: handle sandbox mode faults Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 6/8] sbm: x86: switch to sandbox mode pages in arch_sbm_exec() Petr Tesarik
2024-02-14 11:35 ` [PATCH v1 7/8] sbm: documentation of the x86-64 SandBox Mode implementation Petr Tesarik
2024-02-14 18:37   ` Xin Li
2024-02-14 19:16     ` Petr Tesařík
2024-02-14 11:35 ` [PATCH v1 8/8] sbm: x86: lazy TLB flushing Petr Tesarik
2024-02-14 14:52 ` [PATCH v1 0/8] x86_64 SandBox Mode arch hooks Dave Hansen
2024-02-14 15:28   ` H. Peter Anvin
2024-02-14 16:41     ` Petr Tesařík
2024-02-14 17:29       ` H. Peter Anvin
2024-02-14 19:14         ` Petr Tesařík
2024-02-14 18:14       ` Edgecombe, Rick P
2024-02-14 18:32         ` Petr Tesařík
2024-02-14 19:19           ` Edgecombe, Rick P
2024-02-14 19:35             ` Petr Tesařík
2024-02-14 18:22   ` Petr Tesařík
2024-02-14 18:42     ` Dave Hansen
2024-02-14 19:33       ` Petr Tesařík
2024-02-14 20:16         ` Dave Hansen
2024-02-16 15:24           ` [RFC 0/8] PGP key parser using SandBox Mode Petr Tesarik
2024-02-16 15:24             ` [RFC 1/8] mpi: Introduce mpi_key_length() Petr Tesarik
2024-02-16 15:24             ` [RFC 2/8] rsa: add parser of raw format Petr Tesarik
2024-02-16 15:24             ` [RFC 3/8] PGPLIB: PGP definitions (RFC 4880) Petr Tesarik
2024-02-16 15:24             ` [RFC 4/8] PGPLIB: Basic packet parser Petr Tesarik
2024-02-16 15:24             ` [RFC 5/8] PGPLIB: Signature parser Petr Tesarik
2024-02-16 15:24             ` [RFC 6/8] KEYS: PGP data parser Petr Tesarik
2024-02-16 16:44               ` Matthew Wilcox
2024-02-16 16:53                 ` Roberto Sassu
2024-02-16 17:08                   ` H. Peter Anvin
2024-02-16 17:13                     ` Roberto Sassu
2024-02-20 10:55                     ` Petr Tesarik
2024-02-21 14:02                       ` H. Peter Anvin
2024-02-22  7:53                         ` Petr Tesařík
2024-02-16 18:44                   ` Matthew Wilcox
2024-02-16 19:54                     ` Roberto Sassu
2024-02-28 17:58                       ` Roberto Sassu
2024-02-16 15:24             ` [RFC 7/8] KEYS: Run PGP key parser in a sandbox Petr Tesarik
2024-02-18  6:07               ` kernel test robot
2024-02-18  8:02               ` kernel test robot
2024-02-16 15:24             ` [RFC 8/8] KEYS: Add intentional fault injection Petr Tesarik
2024-02-16 15:38             ` [RFC 0/8] PGP key parser using SandBox Mode Dave Hansen
2024-02-16 16:08               ` Petr Tesařík
2024-02-16 17:21                 ` Jonathan Corbet
2024-02-16 18:24                   ` Roberto Sassu
2024-02-22 13:12           ` [RFC 0/5] PoC: convert AppArmor parser to " Petr Tesarik
2024-02-22 13:12             ` [RFC 1/5] sbm: x86: fix SBM error entry path Petr Tesarik
2024-02-22 13:12             ` [RFC 2/5] sbm: enhance buffer mapping API Petr Tesarik
2024-02-22 13:12             ` [RFC 3/5] sbm: x86: infrastructure to fix up sandbox faults Petr Tesarik
2024-02-22 13:12             ` Petr Tesarik [this message]
2024-02-22 15:51               ` [RFC 4/5] sbm: fix up calls to dynamic memory allocators Dave Hansen
2024-02-22 17:57                 ` Petr Tesařík
2024-02-22 18:03                   ` Dave Hansen
2024-02-22 13:12             ` [RFC 5/5] apparmor: parse profiles in sandbox mode Petr Tesarik
2024-02-14 18:52     ` [PATCH v1 0/8] x86_64 SandBox Mode arch hooks Xin Li
2024-02-15  6:59       ` Petr Tesařík
2024-02-15  8:16         ` H. Peter Anvin
2024-02-15  9:30           ` Petr Tesařík
2024-02-15  9:37             ` Roberto Sassu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240222131230.635-5-petrtesarik@huaweicloud.com \
    --to=petrtesarik@huaweicloud.com \
    --cc=akpm@linux-foundation.org \
    --cc=apparmor@lists.ubuntu.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=brgerst@gmail.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=dwmw@amazon.co.uk \
    --cc=hpa@zytor.com \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=jgg@ziepe.ca \
    --cc=jmorris@namei.org \
    --cc=john.johansen@canonical.com \
    --cc=jpoimboe@kernel.org \
    --cc=jroedel@suse.de \
    --cc=kai.huang@intel.com \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paul@paul-moore.com \
    --cc=pengfei.xu@intel.com \
    --cc=peterz@infradead.org \
    --cc=petr.tesarik1@huawei-partners.com \
    --cc=petr@tesarici.cz \
    --cc=rick.p.edgecombe@intel.com \
    --cc=roberto.sassu@huaweicloud.com \
    --cc=rppt@kernel.org \
    --cc=serge@hallyn.com \
    --cc=tglx@linutronix.de \
    --cc=tina.zhang@intel.com \
    --cc=x86@kernel.org \
    --cc=xin3.li@intel.com \
    --cc=zegao2021@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.