All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Sean Christopherson <seanjc@google.com>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, linux-mips@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, kvm-riscv@lists.infradead.org,
	linux-riscv@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	"Xiaoyao Li" <xiaoyao.li@intel.com>,
	"Xu Yilun" <yilun.xu@intel.com>,
	"Chao Peng" <chao.p.peng@linux.intel.com>,
	"Fuad Tabba" <tabba@google.com>,
	"Jarkko Sakkinen" <jarkko@kernel.org>,
	"Anish Moorthy" <amoorthy@google.com>,
	"David Matlack" <dmatlack@google.com>,
	"Yu Zhang" <yu.c.zhang@linux.intel.com>,
	"Isaku Yamahata" <isaku.yamahata@intel.com>,
	"Mickaël Salaün" <mic@digikod.net>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Vishal Annapurve" <vannapurve@google.com>,
	"Ackerley Tng" <ackerleytng@google.com>,
	"Maciej Szmigiero" <mail@maciej.szmigiero.name>,
	"David Hildenbrand" <david@redhat.com>,
	"Quentin Perret" <qperret@google.com>,
	"Michael Roth" <michael.roth@amd.com>,
	Wang <wei.w.wang@intel.com>,
	"Liam Merwick" <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v13 34/35] KVM: selftests: Add basic selftest for guest_memfd()
Date: Fri, 27 Oct 2023 11:22:16 -0700	[thread overview]
Message-ID: <20231027182217.3615211-35-seanjc@google.com> (raw)
In-Reply-To: <20231027182217.3615211-1-seanjc@google.com>

From: Chao Peng <chao.p.peng@linux.intel.com>

Add a selftest to verify the basic functionality of guest_memfd():

+ file descriptor created with the guest_memfd() ioctl does not allow
  read/write/mmap operations
+ file size and block size as returned from fstat are as expected
+ fallocate on the fd checks that offset/length on
  fallocate(FALLOC_FL_PUNCH_HOLE) should be page aligned
+ invalid inputs (misaligned size, invalid flags) are rejected
+ file size and inode are unique (the innocuous-sounding
  anon_inode_getfile() backs all files with a single inode...)

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Co-developed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../testing/selftests/kvm/guest_memfd_test.c  | 221 ++++++++++++++++++
 2 files changed, 222 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/guest_memfd_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b709a52d5cdb..2b1ef809d73a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -124,6 +124,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_test
 TEST_GEN_PROGS_x86_64 += guest_print_test
 TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
new file mode 100644
index 000000000000..c15de9852316
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Intel Corporation, 2023
+ *
+ * Author: Chao Peng <chao.p.peng@linux.intel.com>
+ */
+
+#define _GNU_SOURCE
+#include "test_util.h"
+#include "kvm_util_base.h"
+#include <linux/bitmap.h>
+#include <linux/falloc.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <fcntl.h>
+
+static void test_file_read_write(int fd)
+{
+	char buf[64];
+
+	TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
+		    "read on a guest_mem fd should fail");
+	TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
+		    "write on a guest_mem fd should fail");
+	TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
+		    "pread on a guest_mem fd should fail");
+	TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
+		    "pwrite on a guest_mem fd should fail");
+}
+
+static void test_mmap(int fd, size_t page_size)
+{
+	char *mem;
+
+	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT_EQ(mem, MAP_FAILED);
+}
+
+static void test_file_size(int fd, size_t page_size, size_t total_size)
+{
+	struct stat sb;
+	int ret;
+
+	ret = fstat(fd, &sb);
+	TEST_ASSERT(!ret, "fstat should succeed");
+	TEST_ASSERT_EQ(sb.st_size, total_size);
+	TEST_ASSERT_EQ(sb.st_blksize, page_size);
+}
+
+static void test_fallocate(int fd, size_t page_size, size_t total_size)
+{
+	int ret;
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
+	TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size - 1, page_size);
+	TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size + page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size - 1);
+	TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+}
+
+static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+{
+	struct {
+		off_t offset;
+		off_t len;
+	} testcases[] = {
+		{0, 1},
+		{0, page_size - 1},
+		{0, page_size + 1},
+
+		{1, 1},
+		{1, page_size - 1},
+		{1, page_size},
+		{1, page_size + 1},
+
+		{page_size, 1},
+		{page_size, page_size - 1},
+		{page_size, page_size + 1},
+	};
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+		ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+				testcases[i].offset, testcases[i].len);
+		TEST_ASSERT(ret == -1 && errno == EINVAL,
+			    "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
+			    testcases[i].offset, testcases[i].len);
+	}
+}
+
+static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+{
+	uint64_t valid_flags = 0;
+	size_t page_size = getpagesize();
+	uint64_t flag;
+	size_t size;
+	int fd;
+
+	for (size = 1; size < page_size; size++) {
+		fd = __vm_create_guest_memfd(vm, size, 0);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
+			    size);
+	}
+
+	if (thp_configured()) {
+		for (size = page_size * 2; size < get_trans_hugepagesz(); size += page_size) {
+			fd = __vm_create_guest_memfd(vm, size, KVM_GUEST_MEMFD_ALLOW_HUGEPAGE);
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with non-hugepage-aligned page size '0x%lx' should fail with EINVAL",
+				    size);
+		}
+
+		valid_flags = KVM_GUEST_MEMFD_ALLOW_HUGEPAGE;
+	}
+
+	for (flag = 1; flag; flag <<= 1) {
+		uint64_t bit;
+
+		if (flag & valid_flags)
+			continue;
+
+		fd = __vm_create_guest_memfd(vm, page_size, flag);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+			    flag);
+
+		for_each_set_bit(bit, &valid_flags, 64) {
+			fd = __vm_create_guest_memfd(vm, page_size, flag | BIT_ULL(bit));
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with flags '0x%llx' should fail with EINVAL",
+				    flag | BIT_ULL(bit));
+		}
+	}
+}
+
+static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
+{
+	int fd1, fd2, ret;
+	struct stat st1, st2;
+
+	fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+	TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+
+	fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+	TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd2, &st2);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+	TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
+}
+
+int main(int argc, char *argv[])
+{
+	size_t page_size;
+	size_t total_size;
+	int fd;
+	struct kvm_vm *vm;
+
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+	page_size = getpagesize();
+	total_size = page_size * 4;
+
+	vm = vm_create_barebones();
+
+	test_create_guest_memfd_invalid(vm);
+	test_create_guest_memfd_multiple(vm);
+
+	fd = vm_create_guest_memfd(vm, total_size, 0);
+
+	test_file_read_write(fd);
+	test_mmap(fd, page_size);
+	test_file_size(fd, page_size, total_size);
+	test_fallocate(fd, page_size, total_size);
+	test_invalid_punch_hole(fd, page_size, total_size);
+
+	close(fd);
+}
-- 
2.42.0.820.g83a721a137-goog


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	 Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Albert Ou <aou@eecs.berkeley.edu>,
	Sean Christopherson <seanjc@google.com>,
	 Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	 "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, linux-mips@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, kvm-riscv@lists.infradead.org,
	linux-riscv@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	"Xiaoyao Li" <xiaoyao.li@intel.com>,
	"Xu Yilun" <yilun.xu@intel.com>,
	"Chao Peng" <chao.p.peng@linux.intel.com>,
	"Fuad Tabba" <tabba@google.com>,
	"Jarkko Sakkinen" <jarkko@kernel.org>,
	"Anish Moorthy" <amoorthy@google.com>,
	"David Matlack" <dmatlack@google.com>,
	"Yu Zhang" <yu.c.zhang@linux.intel.com>,
	"Isaku Yamahata" <isaku.yamahata@intel.com>,
	"Mickaël Salaün" <mic@digikod.net>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Vishal Annapurve" <vannapurve@google.com>,
	"Ackerley Tng" <ackerleytng@google.com>,
	"Maciej Szmigiero" <mail@maciej.szmigiero.name>,
	"David Hildenbrand" <david@redhat.com>,
	"Quentin Perret" <qperret@google.com>,
	"Michael Roth" <michael.roth@amd.com>,
	Wang <wei.w.wang@intel.com>,
	"Liam Merwick" <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v13 34/35] KVM: selftests: Add basic selftest for guest_memfd()
Date: Fri, 27 Oct 2023 11:22:16 -0700	[thread overview]
Message-ID: <20231027182217.3615211-35-seanjc@google.com> (raw)
In-Reply-To: <20231027182217.3615211-1-seanjc@google.com>

From: Chao Peng <chao.p.peng@linux.intel.com>

Add a selftest to verify the basic functionality of guest_memfd():

+ file descriptor created with the guest_memfd() ioctl does not allow
  read/write/mmap operations
+ file size and block size as returned from fstat are as expected
+ fallocate on the fd checks that offset/length on
  fallocate(FALLOC_FL_PUNCH_HOLE) should be page aligned
+ invalid inputs (misaligned size, invalid flags) are rejected
+ file size and inode are unique (the innocuous-sounding
  anon_inode_getfile() backs all files with a single inode...)

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Co-developed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../testing/selftests/kvm/guest_memfd_test.c  | 221 ++++++++++++++++++
 2 files changed, 222 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/guest_memfd_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b709a52d5cdb..2b1ef809d73a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -124,6 +124,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_test
 TEST_GEN_PROGS_x86_64 += guest_print_test
 TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
new file mode 100644
index 000000000000..c15de9852316
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Intel Corporation, 2023
+ *
+ * Author: Chao Peng <chao.p.peng@linux.intel.com>
+ */
+
+#define _GNU_SOURCE
+#include "test_util.h"
+#include "kvm_util_base.h"
+#include <linux/bitmap.h>
+#include <linux/falloc.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <fcntl.h>
+
+static void test_file_read_write(int fd)
+{
+	char buf[64];
+
+	TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
+		    "read on a guest_mem fd should fail");
+	TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
+		    "write on a guest_mem fd should fail");
+	TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
+		    "pread on a guest_mem fd should fail");
+	TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
+		    "pwrite on a guest_mem fd should fail");
+}
+
+static void test_mmap(int fd, size_t page_size)
+{
+	char *mem;
+
+	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT_EQ(mem, MAP_FAILED);
+}
+
+static void test_file_size(int fd, size_t page_size, size_t total_size)
+{
+	struct stat sb;
+	int ret;
+
+	ret = fstat(fd, &sb);
+	TEST_ASSERT(!ret, "fstat should succeed");
+	TEST_ASSERT_EQ(sb.st_size, total_size);
+	TEST_ASSERT_EQ(sb.st_blksize, page_size);
+}
+
+static void test_fallocate(int fd, size_t page_size, size_t total_size)
+{
+	int ret;
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
+	TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size - 1, page_size);
+	TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size + page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size - 1);
+	TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+}
+
+static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+{
+	struct {
+		off_t offset;
+		off_t len;
+	} testcases[] = {
+		{0, 1},
+		{0, page_size - 1},
+		{0, page_size + 1},
+
+		{1, 1},
+		{1, page_size - 1},
+		{1, page_size},
+		{1, page_size + 1},
+
+		{page_size, 1},
+		{page_size, page_size - 1},
+		{page_size, page_size + 1},
+	};
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+		ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+				testcases[i].offset, testcases[i].len);
+		TEST_ASSERT(ret == -1 && errno == EINVAL,
+			    "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
+			    testcases[i].offset, testcases[i].len);
+	}
+}
+
+static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+{
+	uint64_t valid_flags = 0;
+	size_t page_size = getpagesize();
+	uint64_t flag;
+	size_t size;
+	int fd;
+
+	for (size = 1; size < page_size; size++) {
+		fd = __vm_create_guest_memfd(vm, size, 0);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
+			    size);
+	}
+
+	if (thp_configured()) {
+		for (size = page_size * 2; size < get_trans_hugepagesz(); size += page_size) {
+			fd = __vm_create_guest_memfd(vm, size, KVM_GUEST_MEMFD_ALLOW_HUGEPAGE);
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with non-hugepage-aligned page size '0x%lx' should fail with EINVAL",
+				    size);
+		}
+
+		valid_flags = KVM_GUEST_MEMFD_ALLOW_HUGEPAGE;
+	}
+
+	for (flag = 1; flag; flag <<= 1) {
+		uint64_t bit;
+
+		if (flag & valid_flags)
+			continue;
+
+		fd = __vm_create_guest_memfd(vm, page_size, flag);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+			    flag);
+
+		for_each_set_bit(bit, &valid_flags, 64) {
+			fd = __vm_create_guest_memfd(vm, page_size, flag | BIT_ULL(bit));
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with flags '0x%llx' should fail with EINVAL",
+				    flag | BIT_ULL(bit));
+		}
+	}
+}
+
+static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
+{
+	int fd1, fd2, ret;
+	struct stat st1, st2;
+
+	fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+	TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+
+	fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+	TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd2, &st2);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+	TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
+}
+
+int main(int argc, char *argv[])
+{
+	size_t page_size;
+	size_t total_size;
+	int fd;
+	struct kvm_vm *vm;
+
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+	page_size = getpagesize();
+	total_size = page_size * 4;
+
+	vm = vm_create_barebones();
+
+	test_create_guest_memfd_invalid(vm);
+	test_create_guest_memfd_multiple(vm);
+
+	fd = vm_create_guest_memfd(vm, total_size, 0);
+
+	test_file_read_write(fd);
+	test_mmap(fd, page_size);
+	test_file_size(fd, page_size, total_size);
+	test_fallocate(fd, page_size, total_size);
+	test_invalid_punch_hole(fd, page_size, total_size);
+
+	close(fd);
+}
-- 
2.42.0.820.g83a721a137-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	 Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Albert Ou <aou@eecs.berkeley.edu>,
	Sean Christopherson <seanjc@google.com>,
	 Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	 "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: kvm@vger.kernel.org, "David Hildenbrand" <david@redhat.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	"Chao Peng" <chao.p.peng@linux.intel.com>,
	linux-riscv@lists.infradead.org,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Xiaoyao Li" <xiaoyao.li@intel.com>, Wang <wei.w.wang@intel.com>,
	"Fuad Tabba" <tabba@google.com>,
	linux-arm-kernel@lists.infradead.org,
	"Maciej Szmigiero" <mail@maciej.szmigiero.name>,
	"Michael Roth" <michael.roth@amd.com>,
	"Ackerley Tng" <ackerleytng@google.com>,
	"David Matlack" <dmatlack@google.com>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Mickaël Salaün" <mic@digikod.net>,
	"Isaku Yamahata" <isaku.yamahata@intel.com>,
	"Quentin Perret" <qperret@google.com>,
	kvmarm@lists.linux.dev, linux-mips@vger.kernel.org,
	"Jarkko Sakkinen" <jarkko@kernel.org>,
	"Yu Zhang" <yu.c.zhang@linux.intel.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	kvm-riscv@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	"Liam Merwick" <liam.merwick@oracle.com>,
	"Vishal Annapurve" <vannapurve@google.com>,
	linuxppc-dev@lists.ozlabs.org, "Xu Yilun" <yilun.xu@intel.com>,
	"Anish Moorthy" <amoorthy@google.com>
Subject: [PATCH v13 34/35] KVM: selftests: Add basic selftest for guest_memfd()
Date: Fri, 27 Oct 2023 11:22:16 -0700	[thread overview]
Message-ID: <20231027182217.3615211-35-seanjc@google.com> (raw)
In-Reply-To: <20231027182217.3615211-1-seanjc@google.com>

From: Chao Peng <chao.p.peng@linux.intel.com>

Add a selftest to verify the basic functionality of guest_memfd():

+ file descriptor created with the guest_memfd() ioctl does not allow
  read/write/mmap operations
+ file size and block size as returned from fstat are as expected
+ fallocate on the fd checks that offset/length on
  fallocate(FALLOC_FL_PUNCH_HOLE) should be page aligned
+ invalid inputs (misaligned size, invalid flags) are rejected
+ file size and inode are unique (the innocuous-sounding
  anon_inode_getfile() backs all files with a single inode...)

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Co-developed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../testing/selftests/kvm/guest_memfd_test.c  | 221 ++++++++++++++++++
 2 files changed, 222 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/guest_memfd_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b709a52d5cdb..2b1ef809d73a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -124,6 +124,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_test
 TEST_GEN_PROGS_x86_64 += guest_print_test
 TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
new file mode 100644
index 000000000000..c15de9852316
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Intel Corporation, 2023
+ *
+ * Author: Chao Peng <chao.p.peng@linux.intel.com>
+ */
+
+#define _GNU_SOURCE
+#include "test_util.h"
+#include "kvm_util_base.h"
+#include <linux/bitmap.h>
+#include <linux/falloc.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <fcntl.h>
+
+static void test_file_read_write(int fd)
+{
+	char buf[64];
+
+	TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
+		    "read on a guest_mem fd should fail");
+	TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
+		    "write on a guest_mem fd should fail");
+	TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
+		    "pread on a guest_mem fd should fail");
+	TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
+		    "pwrite on a guest_mem fd should fail");
+}
+
+static void test_mmap(int fd, size_t page_size)
+{
+	char *mem;
+
+	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT_EQ(mem, MAP_FAILED);
+}
+
+static void test_file_size(int fd, size_t page_size, size_t total_size)
+{
+	struct stat sb;
+	int ret;
+
+	ret = fstat(fd, &sb);
+	TEST_ASSERT(!ret, "fstat should succeed");
+	TEST_ASSERT_EQ(sb.st_size, total_size);
+	TEST_ASSERT_EQ(sb.st_blksize, page_size);
+}
+
+static void test_fallocate(int fd, size_t page_size, size_t total_size)
+{
+	int ret;
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
+	TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size - 1, page_size);
+	TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size + page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size - 1);
+	TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+}
+
+static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+{
+	struct {
+		off_t offset;
+		off_t len;
+	} testcases[] = {
+		{0, 1},
+		{0, page_size - 1},
+		{0, page_size + 1},
+
+		{1, 1},
+		{1, page_size - 1},
+		{1, page_size},
+		{1, page_size + 1},
+
+		{page_size, 1},
+		{page_size, page_size - 1},
+		{page_size, page_size + 1},
+	};
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+		ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+				testcases[i].offset, testcases[i].len);
+		TEST_ASSERT(ret == -1 && errno == EINVAL,
+			    "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
+			    testcases[i].offset, testcases[i].len);
+	}
+}
+
+static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+{
+	uint64_t valid_flags = 0;
+	size_t page_size = getpagesize();
+	uint64_t flag;
+	size_t size;
+	int fd;
+
+	for (size = 1; size < page_size; size++) {
+		fd = __vm_create_guest_memfd(vm, size, 0);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
+			    size);
+	}
+
+	if (thp_configured()) {
+		for (size = page_size * 2; size < get_trans_hugepagesz(); size += page_size) {
+			fd = __vm_create_guest_memfd(vm, size, KVM_GUEST_MEMFD_ALLOW_HUGEPAGE);
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with non-hugepage-aligned page size '0x%lx' should fail with EINVAL",
+				    size);
+		}
+
+		valid_flags = KVM_GUEST_MEMFD_ALLOW_HUGEPAGE;
+	}
+
+	for (flag = 1; flag; flag <<= 1) {
+		uint64_t bit;
+
+		if (flag & valid_flags)
+			continue;
+
+		fd = __vm_create_guest_memfd(vm, page_size, flag);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+			    flag);
+
+		for_each_set_bit(bit, &valid_flags, 64) {
+			fd = __vm_create_guest_memfd(vm, page_size, flag | BIT_ULL(bit));
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with flags '0x%llx' should fail with EINVAL",
+				    flag | BIT_ULL(bit));
+		}
+	}
+}
+
+static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
+{
+	int fd1, fd2, ret;
+	struct stat st1, st2;
+
+	fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+	TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+
+	fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+	TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd2, &st2);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+	TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
+}
+
+int main(int argc, char *argv[])
+{
+	size_t page_size;
+	size_t total_size;
+	int fd;
+	struct kvm_vm *vm;
+
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+	page_size = getpagesize();
+	total_size = page_size * 4;
+
+	vm = vm_create_barebones();
+
+	test_create_guest_memfd_invalid(vm);
+	test_create_guest_memfd_multiple(vm);
+
+	fd = vm_create_guest_memfd(vm, total_size, 0);
+
+	test_file_read_write(fd);
+	test_mmap(fd, page_size);
+	test_file_size(fd, page_size, total_size);
+	test_fallocate(fd, page_size, total_size);
+	test_invalid_punch_hole(fd, page_size, total_size);
+
+	close(fd);
+}
-- 
2.42.0.820.g83a721a137-goog


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	 Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Albert Ou <aou@eecs.berkeley.edu>,
	Sean Christopherson <seanjc@google.com>,
	 Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	 "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, linux-mips@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, kvm-riscv@lists.infradead.org,
	linux-riscv@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	"Xiaoyao Li" <xiaoyao.li@intel.com>,
	"Xu Yilun" <yilun.xu@intel.com>,
	"Chao Peng" <chao.p.peng@linux.intel.com>,
	"Fuad Tabba" <tabba@google.com>,
	"Jarkko Sakkinen" <jarkko@kernel.org>,
	"Anish Moorthy" <amoorthy@google.com>,
	"David Matlack" <dmatlack@google.com>,
	"Yu Zhang" <yu.c.zhang@linux.intel.com>,
	"Isaku Yamahata" <isaku.yamahata@intel.com>,
	"Mickaël Salaün" <mic@digikod.net>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Vishal Annapurve" <vannapurve@google.com>,
	"Ackerley Tng" <ackerleytng@google.com>,
	"Maciej Szmigiero" <mail@maciej.szmigiero.name>,
	"David Hildenbrand" <david@redhat.com>,
	"Quentin Perret" <qperret@google.com>,
	"Michael Roth" <michael.roth@amd.com>,
	Wang <wei.w.wang@intel.com>,
	"Liam Merwick" <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v13 34/35] KVM: selftests: Add basic selftest for guest_memfd()
Date: Fri, 27 Oct 2023 11:22:16 -0700	[thread overview]
Message-ID: <20231027182217.3615211-35-seanjc@google.com> (raw)
In-Reply-To: <20231027182217.3615211-1-seanjc@google.com>

From: Chao Peng <chao.p.peng@linux.intel.com>

Add a selftest to verify the basic functionality of guest_memfd():

+ file descriptor created with the guest_memfd() ioctl does not allow
  read/write/mmap operations
+ file size and block size as returned from fstat are as expected
+ fallocate on the fd checks that offset/length on
  fallocate(FALLOC_FL_PUNCH_HOLE) should be page aligned
+ invalid inputs (misaligned size, invalid flags) are rejected
+ file size and inode are unique (the innocuous-sounding
  anon_inode_getfile() backs all files with a single inode...)

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Co-developed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../testing/selftests/kvm/guest_memfd_test.c  | 221 ++++++++++++++++++
 2 files changed, 222 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/guest_memfd_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b709a52d5cdb..2b1ef809d73a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -124,6 +124,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_test
 TEST_GEN_PROGS_x86_64 += guest_print_test
 TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
new file mode 100644
index 000000000000..c15de9852316
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Intel Corporation, 2023
+ *
+ * Author: Chao Peng <chao.p.peng@linux.intel.com>
+ */
+
+#define _GNU_SOURCE
+#include "test_util.h"
+#include "kvm_util_base.h"
+#include <linux/bitmap.h>
+#include <linux/falloc.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <fcntl.h>
+
+static void test_file_read_write(int fd)
+{
+	char buf[64];
+
+	TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
+		    "read on a guest_mem fd should fail");
+	TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
+		    "write on a guest_mem fd should fail");
+	TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
+		    "pread on a guest_mem fd should fail");
+	TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
+		    "pwrite on a guest_mem fd should fail");
+}
+
+static void test_mmap(int fd, size_t page_size)
+{
+	char *mem;
+
+	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT_EQ(mem, MAP_FAILED);
+}
+
+static void test_file_size(int fd, size_t page_size, size_t total_size)
+{
+	struct stat sb;
+	int ret;
+
+	ret = fstat(fd, &sb);
+	TEST_ASSERT(!ret, "fstat should succeed");
+	TEST_ASSERT_EQ(sb.st_size, total_size);
+	TEST_ASSERT_EQ(sb.st_blksize, page_size);
+}
+
+static void test_fallocate(int fd, size_t page_size, size_t total_size)
+{
+	int ret;
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
+	TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size - 1, page_size);
+	TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
+	TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			total_size + page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size - 1);
+	TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
+	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+}
+
+static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+{
+	struct {
+		off_t offset;
+		off_t len;
+	} testcases[] = {
+		{0, 1},
+		{0, page_size - 1},
+		{0, page_size + 1},
+
+		{1, 1},
+		{1, page_size - 1},
+		{1, page_size},
+		{1, page_size + 1},
+
+		{page_size, 1},
+		{page_size, page_size - 1},
+		{page_size, page_size + 1},
+	};
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+		ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+				testcases[i].offset, testcases[i].len);
+		TEST_ASSERT(ret == -1 && errno == EINVAL,
+			    "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
+			    testcases[i].offset, testcases[i].len);
+	}
+}
+
+static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+{
+	uint64_t valid_flags = 0;
+	size_t page_size = getpagesize();
+	uint64_t flag;
+	size_t size;
+	int fd;
+
+	for (size = 1; size < page_size; size++) {
+		fd = __vm_create_guest_memfd(vm, size, 0);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
+			    size);
+	}
+
+	if (thp_configured()) {
+		for (size = page_size * 2; size < get_trans_hugepagesz(); size += page_size) {
+			fd = __vm_create_guest_memfd(vm, size, KVM_GUEST_MEMFD_ALLOW_HUGEPAGE);
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with non-hugepage-aligned page size '0x%lx' should fail with EINVAL",
+				    size);
+		}
+
+		valid_flags = KVM_GUEST_MEMFD_ALLOW_HUGEPAGE;
+	}
+
+	for (flag = 1; flag; flag <<= 1) {
+		uint64_t bit;
+
+		if (flag & valid_flags)
+			continue;
+
+		fd = __vm_create_guest_memfd(vm, page_size, flag);
+		TEST_ASSERT(fd == -1 && errno == EINVAL,
+			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+			    flag);
+
+		for_each_set_bit(bit, &valid_flags, 64) {
+			fd = __vm_create_guest_memfd(vm, page_size, flag | BIT_ULL(bit));
+			TEST_ASSERT(fd == -1 && errno == EINVAL,
+				    "guest_memfd() with flags '0x%llx' should fail with EINVAL",
+				    flag | BIT_ULL(bit));
+		}
+	}
+}
+
+static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
+{
+	int fd1, fd2, ret;
+	struct stat st1, st2;
+
+	fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+	TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+
+	fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+	TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
+
+	ret = fstat(fd2, &st2);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+
+	ret = fstat(fd1, &st1);
+	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+	TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+	TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
+}
+
+int main(int argc, char *argv[])
+{
+	size_t page_size;
+	size_t total_size;
+	int fd;
+	struct kvm_vm *vm;
+
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+	page_size = getpagesize();
+	total_size = page_size * 4;
+
+	vm = vm_create_barebones();
+
+	test_create_guest_memfd_invalid(vm);
+	test_create_guest_memfd_multiple(vm);
+
+	fd = vm_create_guest_memfd(vm, total_size, 0);
+
+	test_file_read_write(fd);
+	test_mmap(fd, page_size);
+	test_file_size(fd, page_size, total_size);
+	test_fallocate(fd, page_size, total_size);
+	test_invalid_punch_hole(fd, page_size, total_size);
+
+	close(fd);
+}
-- 
2.42.0.820.g83a721a137-goog


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2023-10-27 18:32 UTC|newest]

Thread overview: 583+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-27 18:21 [PATCH v13 00/35] KVM: guest_memfd() and per-page attributes Sean Christopherson
2023-10-27 18:21 ` Sean Christopherson
2023-10-27 18:21 ` Sean Christopherson
2023-10-27 18:21 ` Sean Christopherson
2023-10-27 18:21 ` [PATCH v13 01/35] KVM: Tweak kvm_hva_range and hva_handler_t to allow reusing for gfn ranges Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-11-01 12:46   ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 02/35] KVM: Assert that mmu_invalidate_in_progress *never* goes negative Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:27   ` Paolo Bonzini
2023-10-30 16:27     ` Paolo Bonzini
2023-10-30 16:27     ` Paolo Bonzini
2023-10-30 16:27     ` Paolo Bonzini
2023-11-01 12:46   ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-11-01 12:46     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 03/35] KVM: Use gfn instead of hva for mmu_notifier_retry Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:30   ` Paolo Bonzini
2023-10-30 16:30     ` Paolo Bonzini
2023-10-30 16:30     ` Paolo Bonzini
2023-10-30 16:30     ` Paolo Bonzini
2023-10-30 16:53   ` David Matlack
2023-10-30 16:53     ` David Matlack
2023-10-30 16:53     ` David Matlack
2023-10-30 16:53     ` David Matlack
2023-10-30 17:00     ` Paolo Bonzini
2023-10-30 17:00       ` Paolo Bonzini
2023-10-30 17:00       ` Paolo Bonzini
2023-10-30 17:00       ` Paolo Bonzini
2023-10-30 18:21       ` David Matlack
2023-10-30 18:21         ` David Matlack
2023-10-30 18:21         ` David Matlack
2023-10-30 18:21         ` David Matlack
2023-10-30 18:19     ` David Matlack
2023-10-30 18:19       ` David Matlack
2023-10-30 18:19       ` David Matlack
2023-10-30 18:19       ` David Matlack
2023-11-01 15:31   ` Xu Yilun
2023-11-01 15:31     ` Xu Yilun
2023-11-01 15:31     ` Xu Yilun
2023-11-01 15:31     ` Xu Yilun
2023-10-27 18:21 ` [PATCH v13 04/35] KVM: WARN if there are dangling MMU invalidations at VM destruction Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:32   ` Paolo Bonzini
2023-10-30 16:32     ` Paolo Bonzini
2023-10-30 16:32     ` Paolo Bonzini
2023-10-30 16:32     ` Paolo Bonzini
2023-11-01 12:50   ` Fuad Tabba
2023-11-01 12:50     ` Fuad Tabba
2023-11-01 12:50     ` Fuad Tabba
2023-11-01 12:50     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 05/35] KVM: PPC: Drop dead code related to KVM_ARCH_WANT_MMU_NOTIFIER Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:34   ` Paolo Bonzini
2023-10-30 16:34     ` Paolo Bonzini
2023-10-30 16:34     ` Paolo Bonzini
2023-10-30 16:34     ` Paolo Bonzini
2023-11-01 12:51   ` Fuad Tabba
2023-11-01 12:51     ` Fuad Tabba
2023-11-01 12:51     ` Fuad Tabba
2023-11-01 12:51     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 06/35] KVM: PPC: Return '1' unconditionally for KVM_CAP_SYNC_MMU Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21 ` [PATCH v13 07/35] KVM: Convert KVM_ARCH_WANT_MMU_NOTIFIER to CONFIG_KVM_GENERIC_MMU_NOTIFIER Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:37   ` Paolo Bonzini
2023-10-30 16:37     ` Paolo Bonzini
2023-10-30 16:37     ` Paolo Bonzini
2023-10-30 16:37     ` Paolo Bonzini
2023-11-01 12:54   ` Fuad Tabba
2023-11-01 12:54     ` Fuad Tabba
2023-11-01 12:54     ` Fuad Tabba
2023-11-01 12:54     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 08/35] KVM: Introduce KVM_SET_USER_MEMORY_REGION2 Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 16:41   ` Paolo Bonzini
2023-10-30 16:41     ` Paolo Bonzini
2023-10-30 16:41     ` Paolo Bonzini
2023-10-30 16:41     ` Paolo Bonzini
2023-10-30 20:25     ` Sean Christopherson
2023-10-30 20:25       ` Sean Christopherson
2023-10-30 20:25       ` Sean Christopherson
2023-10-30 20:25       ` Sean Christopherson
2023-10-30 22:12       ` Sean Christopherson
2023-10-30 22:12         ` Sean Christopherson
2023-10-30 22:12         ` Sean Christopherson
2023-10-30 22:12         ` Sean Christopherson
2023-10-30 23:22       ` Paolo Bonzini
2023-10-30 23:22         ` Paolo Bonzini
2023-10-30 23:22         ` Paolo Bonzini
2023-10-30 23:22         ` Paolo Bonzini
2023-10-31  0:18         ` Sean Christopherson
2023-10-31  0:18           ` Sean Christopherson
2023-10-31  0:18           ` Sean Christopherson
2023-10-31  0:18           ` Sean Christopherson
2023-10-31  2:26   ` Xiaoyao Li
2023-10-31  2:26     ` Xiaoyao Li
2023-10-31  2:26     ` Xiaoyao Li
2023-10-31  2:26     ` Xiaoyao Li
2023-10-31 14:04     ` Sean Christopherson
2023-10-31 14:04       ` Sean Christopherson
2023-10-31 14:04       ` Sean Christopherson
2023-10-31 14:04       ` Sean Christopherson
2023-11-01 14:19   ` Fuad Tabba
2023-11-01 14:19     ` Fuad Tabba
2023-11-01 14:19     ` Fuad Tabba
2023-11-01 14:19     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 09/35] KVM: Add KVM_EXIT_MEMORY_FAULT exit to report faults to userspace Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:22   ` Paolo Bonzini
2023-10-30 17:22     ` Paolo Bonzini
2023-10-30 17:22     ` Paolo Bonzini
2023-10-30 17:22     ` Paolo Bonzini
2023-11-01  7:30   ` Binbin Wu
2023-11-01  7:30     ` Binbin Wu
2023-11-01  7:30     ` Binbin Wu
2023-11-01  7:30     ` Binbin Wu
2023-11-01 10:52   ` Huang, Kai
2023-11-01 10:52     ` Huang, Kai
2023-11-01 10:52     ` Huang, Kai
2023-11-01 10:52     ` Huang, Kai
2023-11-01 17:36     ` Sean Christopherson
2023-11-01 17:36       ` Sean Christopherson
2023-11-01 17:36       ` Sean Christopherson
2023-11-01 17:36       ` Sean Christopherson
2023-11-02  2:19       ` Xiaoyao Li
2023-11-02  2:19         ` Xiaoyao Li
2023-11-02  2:19         ` Xiaoyao Li
2023-11-02  2:19         ` Xiaoyao Li
2023-11-02 15:51         ` Sean Christopherson
2023-11-02 15:51           ` Sean Christopherson
2023-11-02 15:51           ` Sean Christopherson
2023-11-02 15:51           ` Sean Christopherson
2023-11-02  3:17       ` Huang, Kai
2023-11-02  3:17         ` Huang, Kai
2023-11-02  3:17         ` Huang, Kai
2023-11-02  3:17         ` Huang, Kai
2023-11-02  9:35         ` Huang, Kai
2023-11-02  9:35           ` Huang, Kai
2023-11-02  9:35           ` Huang, Kai
2023-11-02  9:35           ` Huang, Kai
2023-11-02 11:03           ` Paolo Bonzini
2023-11-02 11:03             ` Paolo Bonzini
2023-11-02 11:03             ` Paolo Bonzini
2023-11-02 11:03             ` Paolo Bonzini
2023-11-02 15:44             ` Sean Christopherson
2023-11-02 15:44               ` Sean Christopherson
2023-11-02 15:44               ` Sean Christopherson
2023-11-02 15:44               ` Sean Christopherson
2023-11-02 18:35               ` Huang, Kai
2023-11-02 18:35                 ` Huang, Kai
2023-11-02 18:35                 ` Huang, Kai
2023-11-02 18:35                 ` Huang, Kai
2023-11-02 15:56         ` Sean Christopherson
2023-11-02 15:56           ` Sean Christopherson
2023-11-02 15:56           ` Sean Christopherson
2023-11-02 15:56           ` Sean Christopherson
2023-11-02 11:01       ` Paolo Bonzini
2023-11-02 11:01         ` Paolo Bonzini
2023-11-02 11:01         ` Paolo Bonzini
2023-11-02 11:01         ` Paolo Bonzini
2023-11-03  4:09   ` Xu Yilun
2023-11-03  4:09     ` Xu Yilun
2023-11-03  4:09     ` Xu Yilun
2023-11-03  4:09     ` Xu Yilun
2023-10-27 18:21 ` [PATCH v13 10/35] KVM: Add a dedicated mmu_notifier flag for reclaiming freed memory Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:11   ` Paolo Bonzini
2023-10-30 17:11     ` Paolo Bonzini
2023-10-30 17:11     ` Paolo Bonzini
2023-10-30 17:11     ` Paolo Bonzini
2023-11-02 13:55   ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 11/35] KVM: Drop .on_unlock() mmu_notifier hook Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:18   ` Paolo Bonzini
2023-10-30 17:18     ` Paolo Bonzini
2023-10-30 17:18     ` Paolo Bonzini
2023-10-30 17:18     ` Paolo Bonzini
2023-11-02 13:55   ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-11-02 13:55     ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 12/35] KVM: Prepare for handling only shared mappings in mmu_notifier events Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:21   ` Paolo Bonzini
2023-10-30 17:21     ` Paolo Bonzini
2023-10-30 17:21     ` Paolo Bonzini
2023-10-30 17:21     ` Paolo Bonzini
2023-10-30 22:07     ` Sean Christopherson
2023-10-30 22:07       ` Sean Christopherson
2023-10-30 22:07       ` Sean Christopherson
2023-10-30 22:07       ` Sean Christopherson
2023-11-02  5:59   ` Binbin Wu
2023-11-02  5:59     ` Binbin Wu
2023-11-02  5:59     ` Binbin Wu
2023-11-02  5:59     ` Binbin Wu
2023-11-02 11:14     ` Paolo Bonzini
2023-11-02 11:14       ` Paolo Bonzini
2023-11-02 11:14       ` Paolo Bonzini
2023-11-02 11:14       ` Paolo Bonzini
2023-11-02 14:01   ` Fuad Tabba
2023-11-02 14:01     ` Fuad Tabba
2023-11-02 14:01     ` Fuad Tabba
2023-11-02 14:01     ` Fuad Tabba
2023-11-02 14:41     ` Sean Christopherson
2023-11-02 14:41       ` Sean Christopherson
2023-11-02 14:41       ` Sean Christopherson
2023-11-02 14:41       ` Sean Christopherson
2023-11-02 14:57       ` Fuad Tabba
2023-11-02 14:57         ` Fuad Tabba
2023-11-02 14:57         ` Fuad Tabba
2023-11-02 14:57         ` Fuad Tabba
2023-10-27 18:21 ` [PATCH v13 13/35] KVM: Introduce per-page memory attributes Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30  8:11   ` Chao Gao
2023-10-30  8:11     ` Chao Gao
2023-10-30  8:11     ` Chao Gao
2023-10-30  8:11     ` Chao Gao
2023-10-30 16:10     ` Sean Christopherson
2023-10-30 16:10       ` Sean Christopherson
2023-10-30 16:10       ` Sean Christopherson
2023-10-30 16:10       ` Sean Christopherson
2023-10-30 22:05       ` Sean Christopherson
2023-10-30 22:05         ` Sean Christopherson
2023-10-30 22:05         ` Sean Christopherson
2023-10-30 22:05         ` Sean Christopherson
2023-10-31 16:43   ` David Matlack
2023-10-31 16:43     ` David Matlack
2023-10-31 16:43     ` David Matlack
2023-10-31 16:43     ` David Matlack
2023-11-02  3:01   ` Huang, Kai
2023-11-02  3:01     ` Huang, Kai
2023-11-02  3:01     ` Huang, Kai
2023-11-02  3:01     ` Huang, Kai
2023-11-02 10:32     ` Paolo Bonzini
2023-11-02 10:32       ` Paolo Bonzini
2023-11-02 10:32       ` Paolo Bonzini
2023-11-02 10:32       ` Paolo Bonzini
2023-11-02 10:55       ` Huang, Kai
2023-11-02 10:55         ` Huang, Kai
2023-11-02 10:55         ` Huang, Kai
2023-11-02 10:55         ` Huang, Kai
2023-10-27 18:21 ` [PATCH v13 14/35] mm: Add AS_UNMOVABLE to mark mapping as completely unmovable Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:24   ` Paolo Bonzini
2023-10-30 17:24     ` Paolo Bonzini
2023-10-30 17:24     ` Paolo Bonzini
2023-10-30 17:24     ` Paolo Bonzini
2023-10-27 18:21 ` [PATCH v13 15/35] fs: Export anon_inode_getfile_secure() for use by KVM Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-30 17:30   ` Paolo Bonzini
2023-10-30 17:30     ` Paolo Bonzini
2023-10-30 17:30     ` Paolo Bonzini
2023-10-30 17:30     ` Paolo Bonzini
2023-11-02 16:24   ` Christian Brauner
2023-11-02 16:24     ` Christian Brauner
2023-11-02 16:24     ` Christian Brauner
2023-11-02 16:24     ` Christian Brauner
2023-11-03 10:40     ` Paolo Bonzini
2023-11-03 10:40       ` Paolo Bonzini
2023-11-03 10:40       ` Paolo Bonzini
2023-11-03 10:40       ` Paolo Bonzini
2023-10-27 18:21 ` [PATCH v13 16/35] KVM: Add KVM_CREATE_GUEST_MEMFD ioctl() for guest-specific backing memory Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-31  2:27   ` Xiaoyao Li
2023-10-31  2:27     ` Xiaoyao Li
2023-10-31  2:27     ` Xiaoyao Li
2023-10-31  2:27     ` Xiaoyao Li
2023-10-31  6:30   ` Chao Gao
2023-10-31  6:30     ` Chao Gao
2023-10-31  6:30     ` Chao Gao
2023-10-31  6:30     ` Chao Gao
2023-10-31 14:10     ` Sean Christopherson
2023-10-31 14:10       ` Sean Christopherson
2023-10-31 14:10       ` Sean Christopherson
2023-10-31 14:10       ` Sean Christopherson
2023-10-31 15:05   ` Fuad Tabba
2023-10-31 15:05     ` Fuad Tabba
2023-10-31 15:05     ` Fuad Tabba
2023-10-31 15:05     ` Fuad Tabba
2023-10-31 22:13     ` Sean Christopherson
2023-10-31 22:13       ` Sean Christopherson
2023-10-31 22:13       ` Sean Christopherson
2023-10-31 22:13       ` Sean Christopherson
2023-10-31 22:18       ` Paolo Bonzini
2023-10-31 22:18         ` Paolo Bonzini
2023-10-31 22:18         ` Paolo Bonzini
2023-10-31 22:18         ` Paolo Bonzini
2023-11-01 10:51       ` Fuad Tabba
2023-11-01 10:51         ` Fuad Tabba
2023-11-01 10:51         ` Fuad Tabba
2023-11-01 10:51         ` Fuad Tabba
2023-11-01 21:55         ` Sean Christopherson
2023-11-01 21:55           ` Sean Christopherson
2023-11-01 21:55           ` Sean Christopherson
2023-11-01 21:55           ` Sean Christopherson
2023-11-02 13:52           ` Fuad Tabba
2023-11-02 13:52             ` Fuad Tabba
2023-11-02 13:52             ` Fuad Tabba
2023-11-02 13:52             ` Fuad Tabba
2023-11-03 23:17             ` Sean Christopherson
2023-11-03 23:17               ` Sean Christopherson
2023-11-03 23:17               ` Sean Christopherson
2023-11-03 23:17               ` Sean Christopherson
2023-10-31 18:24   ` David Matlack
2023-10-31 18:24     ` David Matlack
2023-10-31 18:24     ` David Matlack
2023-10-31 18:24     ` David Matlack
2023-10-31 21:36     ` Sean Christopherson
2023-10-31 21:36       ` Sean Christopherson
2023-10-31 21:36       ` Sean Christopherson
2023-10-31 21:36       ` Sean Christopherson
2023-10-31 22:39       ` David Matlack
2023-10-31 22:39         ` David Matlack
2023-10-31 22:39         ` David Matlack
2023-10-31 22:39         ` David Matlack
2023-11-02 15:48         ` Paolo Bonzini
2023-11-02 15:48           ` Paolo Bonzini
2023-11-02 15:48           ` Paolo Bonzini
2023-11-02 15:48           ` Paolo Bonzini
2023-11-02 16:03           ` Sean Christopherson
2023-11-02 16:03             ` Sean Christopherson
2023-11-02 16:03             ` Sean Christopherson
2023-11-02 16:03             ` Sean Christopherson
2023-11-02 16:28             ` David Matlack
2023-11-02 16:28               ` David Matlack
2023-11-02 16:28               ` David Matlack
2023-11-02 16:28               ` David Matlack
2023-11-02 17:37               ` Sean Christopherson
2023-11-02 17:37                 ` Sean Christopherson
2023-11-02 17:37                 ` Sean Christopherson
2023-11-02 17:37                 ` Sean Christopherson
2023-11-03  9:42   ` Fuad Tabba
2023-11-03  9:42     ` Fuad Tabba
2023-11-03  9:42     ` Fuad Tabba
2023-11-03  9:42     ` Fuad Tabba
2023-11-04 10:26   ` Xu Yilun
2023-11-04 10:26     ` Xu Yilun
2023-11-04 10:26     ` Xu Yilun
2023-11-04 10:26     ` Xu Yilun
2023-11-06 15:43     ` Sean Christopherson
2023-11-06 15:43       ` Sean Christopherson
2023-11-06 15:43       ` Sean Christopherson
2023-11-06 15:43       ` Sean Christopherson
2023-10-27 18:21 ` [PATCH v13 17/35] KVM: Add transparent hugepage support for dedicated guest memory Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-27 18:21   ` Sean Christopherson
2023-10-31  8:35   ` Xiaoyao Li
2023-10-31  8:35     ` Xiaoyao Li
2023-10-31  8:35     ` Xiaoyao Li
2023-10-31  8:35     ` Xiaoyao Li
2023-10-31 14:16     ` Sean Christopherson
2023-10-31 14:16       ` Sean Christopherson
2023-10-31 14:16       ` Sean Christopherson
2023-10-31 14:16       ` Sean Christopherson
2023-11-01  7:25       ` Xiaoyao Li
2023-11-01  7:25         ` Xiaoyao Li
2023-11-01  7:25         ` Xiaoyao Li
2023-11-01  7:25         ` Xiaoyao Li
2023-11-01 13:41         ` Sean Christopherson
2023-11-01 13:41           ` Sean Christopherson
2023-11-01 13:41           ` Sean Christopherson
2023-11-01 13:41           ` Sean Christopherson
2023-11-01 13:49           ` Paolo Bonzini
2023-11-01 13:49             ` Paolo Bonzini
2023-11-01 13:49             ` Paolo Bonzini
2023-11-01 13:49             ` Paolo Bonzini
2023-11-01 16:36             ` Sean Christopherson
2023-11-01 16:36               ` Sean Christopherson
2023-11-01 16:36               ` Sean Christopherson
2023-11-01 16:36               ` Sean Christopherson
2023-11-01 22:28               ` Paolo Bonzini
2023-11-01 22:28                 ` Paolo Bonzini
2023-11-01 22:28                 ` Paolo Bonzini
2023-11-01 22:28                 ` Paolo Bonzini
2023-11-01 22:34                 ` Sean Christopherson
2023-11-01 22:34                   ` Sean Christopherson
2023-11-01 22:34                   ` Sean Christopherson
2023-11-01 22:34                   ` Sean Christopherson
2023-11-01 23:17                   ` Paolo Bonzini
2023-11-01 23:17                     ` Paolo Bonzini
2023-11-01 23:17                     ` Paolo Bonzini
2023-11-01 23:17                     ` Paolo Bonzini
2023-11-02 15:38                     ` Sean Christopherson
2023-11-02 15:38                       ` Sean Christopherson
2023-11-02 15:38                       ` Sean Christopherson
2023-11-02 15:38                       ` Sean Christopherson
2023-11-02 15:46                       ` Paolo Bonzini
2023-11-02 15:46                         ` Paolo Bonzini
2023-11-02 15:46                         ` Paolo Bonzini
2023-11-02 15:46                         ` Paolo Bonzini
2023-11-27 11:13                         ` Vlastimil Babka
2023-11-27 11:13                           ` Vlastimil Babka
2023-11-27 11:13                           ` Vlastimil Babka
2023-11-29 22:40                           ` Sean Christopherson
2023-11-29 22:40                             ` Sean Christopherson
2023-11-29 22:40                             ` Sean Christopherson
2023-11-29 22:40                             ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 18/35] KVM: x86: "Reset" vcpu->run->exit_reason early in KVM_RUN Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-30 17:31   ` Paolo Bonzini
2023-10-30 17:31     ` Paolo Bonzini
2023-10-30 17:31     ` Paolo Bonzini
2023-10-30 17:31     ` Paolo Bonzini
2023-11-02 14:16   ` Fuad Tabba
2023-11-02 14:16     ` Fuad Tabba
2023-11-02 14:16     ` Fuad Tabba
2023-11-02 14:16     ` Fuad Tabba
2023-10-27 18:22 ` [PATCH v13 19/35] KVM: x86: Disallow hugepages when memory attributes are mixed Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 20/35] KVM: x86/mmu: Handle page fault for private memory Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-11-02 14:34   ` Fuad Tabba
2023-11-02 14:34     ` Fuad Tabba
2023-11-02 14:34     ` Fuad Tabba
2023-11-02 14:34     ` Fuad Tabba
2023-11-05 13:02   ` Xu Yilun
2023-11-05 13:02     ` Xu Yilun
2023-11-05 13:02     ` Xu Yilun
2023-11-05 13:02     ` Xu Yilun
2023-11-05 16:19     ` Paolo Bonzini
2023-11-05 16:19       ` Paolo Bonzini
2023-11-05 16:19       ` Paolo Bonzini
2023-11-05 16:19       ` Paolo Bonzini
2023-11-06 13:29       ` Xu Yilun
2023-11-06 13:29         ` Xu Yilun
2023-11-06 13:29         ` Xu Yilun
2023-11-06 13:29         ` Xu Yilun
2023-11-06 15:56         ` Sean Christopherson
2023-11-06 15:56           ` Sean Christopherson
2023-11-06 15:56           ` Sean Christopherson
2023-11-06 15:56           ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 21/35] KVM: Drop superfluous __KVM_VCPU_MULTIPLE_ADDRESS_SPACE macro Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-11-02 14:35   ` Fuad Tabba
2023-11-02 14:35     ` Fuad Tabba
2023-11-02 14:35     ` Fuad Tabba
2023-11-02 14:35     ` Fuad Tabba
2023-10-27 18:22 ` [PATCH v13 22/35] KVM: Allow arch code to track number of memslot address spaces per VM Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-30 17:34   ` Paolo Bonzini
2023-10-30 17:34     ` Paolo Bonzini
2023-10-30 17:34     ` Paolo Bonzini
2023-10-30 17:34     ` Paolo Bonzini
2023-11-02 14:52   ` Fuad Tabba
2023-11-02 14:52     ` Fuad Tabba
2023-11-02 14:52     ` Fuad Tabba
2023-11-02 14:52     ` Fuad Tabba
2023-10-27 18:22 ` [PATCH v13 23/35] KVM: x86: Add support for "protected VMs" that can utilize private memory Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-30 17:36   ` Paolo Bonzini
2023-10-30 17:36     ` Paolo Bonzini
2023-10-30 17:36     ` Paolo Bonzini
2023-10-30 17:36     ` Paolo Bonzini
2023-11-06 11:00   ` Fuad Tabba
2023-11-06 11:00     ` Fuad Tabba
2023-11-06 11:00     ` Fuad Tabba
2023-11-06 11:00     ` Fuad Tabba
2023-11-06 11:03     ` Paolo Bonzini
2023-11-06 11:03       ` Paolo Bonzini
2023-11-06 11:03       ` Paolo Bonzini
2023-11-06 11:03       ` Paolo Bonzini
2023-10-27 18:22 ` [PATCH v13 24/35] KVM: selftests: Drop unused kvm_userspace_memory_region_find() helper Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 25/35] KVM: selftests: Convert lib's mem regions to KVM_SET_USER_MEMORY_REGION2 Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2024-04-25 14:12   ` Dan Carpenter
2024-04-25 14:12     ` Dan Carpenter
2024-04-25 14:45     ` Shuah Khan
2024-04-25 14:45       ` Shuah Khan
2024-04-25 14:45       ` Shuah Khan
2024-04-25 15:09       ` Sean Christopherson
2024-04-25 15:09         ` Sean Christopherson
2024-04-25 16:22         ` Shuah Khan
2024-04-25 16:22           ` Shuah Khan
2024-04-26  7:33         ` Jarkko Sakkinen
2024-04-26  7:33           ` Jarkko Sakkinen
2024-04-26  7:33           ` Jarkko Sakkinen
2023-10-27 18:22 ` [PATCH v13 26/35] KVM: selftests: Add support for creating private memslots Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 27/35] KVM: selftests: Add helpers to convert guest memory b/w private and shared Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-11-06 11:26   ` Fuad Tabba
2023-11-06 11:26     ` Fuad Tabba
2023-11-06 11:26     ` Fuad Tabba
2023-11-06 11:26     ` Fuad Tabba
2023-10-27 18:22 ` [PATCH v13 28/35] KVM: selftests: Add helpers to do KVM_HC_MAP_GPA_RANGE hypercalls (x86) Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 29/35] KVM: selftests: Introduce VM "shape" to allow tests to specify the VM type Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 30/35] KVM: selftests: Add GUEST_SYNC[1-6] macros for synchronizing more data Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 31/35] KVM: selftests: Add x86-only selftest for private memory conversions Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 32/35] KVM: selftests: Add KVM_SET_USER_MEMORY_REGION2 helper Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 33/35] KVM: selftests: Expand set_memory_region_test to validate guest_memfd() Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` Sean Christopherson [this message]
2023-10-27 18:22   ` [PATCH v13 34/35] KVM: selftests: Add basic selftest for guest_memfd() Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22 ` [PATCH v13 35/35] KVM: selftests: Test KVM exit behavior for private memory/access Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-27 18:22   ` Sean Christopherson
2023-10-30 17:39 ` [PATCH v13 00/35] KVM: guest_memfd() and per-page attributes Paolo Bonzini
2023-10-30 17:39   ` Paolo Bonzini
2023-10-30 17:39   ` Paolo Bonzini
2023-10-30 17:39   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231027182217.3615211-35-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=ackerleytng@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=amoorthy@google.com \
    --cc=anup@brainfault.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=brauner@kernel.org \
    --cc=chao.p.peng@linux.intel.com \
    --cc=chenhuacai@kernel.org \
    --cc=david@redhat.com \
    --cc=dmatlack@google.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=isaku.yamahata@intel.com \
    --cc=jarkko@kernel.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=liam.merwick@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mail@maciej.szmigiero.name \
    --cc=maz@kernel.org \
    --cc=mic@digikod.net \
    --cc=michael.roth@amd.com \
    --cc=mpe@ellerman.id.au \
    --cc=oliver.upton@linux.dev \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=qperret@google.com \
    --cc=tabba@google.com \
    --cc=vannapurve@google.com \
    --cc=vbabka@suse.cz \
    --cc=viro@zeniv.linux.org.uk \
    --cc=wei.w.wang@intel.com \
    --cc=willy@infradead.org \
    --cc=xiaoyao.li@intel.com \
    --cc=yilun.xu@intel.com \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.