From: Ackerley Tng <ackerleytng@google.com>
To: kvm@vger.kernel.org, linux-api@vger.kernel.org,
linux-arch@vger.kernel.org, linux-doc@vger.kernel.org,
linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, qemu-devel@nongnu.org
Cc: aarcange@redhat.com, ak@linux.intel.com,
akpm@linux-foundation.org, arnd@arndb.de, bfields@fieldses.org,
bp@alien8.de, chao.p.peng@linux.intel.com, corbet@lwn.net,
dave.hansen@intel.com, david@redhat.com, ddutile@redhat.com,
dhildenb@redhat.com, hpa@zytor.com, hughd@google.com,
jlayton@kernel.org, jmattson@google.com, joro@8bytes.org,
jun.nakajima@intel.com, kirill.shutemov@linux.intel.com,
linmiaohe@huawei.com, luto@kernel.org,
mail@maciej.szmigiero.name, mhocko@suse.com,
michael.roth@amd.com, mingo@redhat.com, naoya.horiguchi@nec.com,
pbonzini@redhat.com, qperret@google.com, rppt@kernel.org,
seanjc@google.com, shuah@kernel.org, steven.price@arm.com,
tabba@google.com, tglx@linutronix.de, vannapurve@google.com,
vbabka@suse.cz, vkuznets@redhat.com, wanpengli@tencent.com,
wei.w.wang@intel.com, x86@kernel.org, yu.c.zhang@linux.intel.com,
Ackerley Tng <ackerleytng@google.com>
Subject: [RFC PATCH 08/10] KVM: selftests: Default private_mem_conversions_test to use 1 restrictedmem file for test data
Date: Thu, 16 Mar 2023 00:31:01 +0000 [thread overview]
Message-ID: <287d6e84dc788d84599392ca5d65864201f9a6a4.1678926164.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1678926164.git.ackerleytng@google.com>
Default the private/shared memory conversion tests to use a single
file (when multiple memslots are requested), while executing on
multiple vCPUs in parallel, to stress-test the restrictedmem subsystem.
Also add a flag to allow multiple files to be used.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
.../kvm/x86_64/private_mem_conversions_test.c | 52 ++++++++++++++-----
1 file changed, 38 insertions(+), 14 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c
index afaf8d0e52e6..ca30f0f05c39 100644
--- a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c
+++ b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c
@@ -324,7 +324,8 @@ void *thread_function(void *input)
}
static void add_memslot_for_vcpu(
- struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, uint8_t vcpu_id)
+ struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, uint8_t vcpu_id,
+ int restrictedmem_fd, uint64_t restrictedmem_offset)
{
uint64_t gpa = data_gpa_base_for_vcpu_id(vcpu_id);
uint32_t slot = DATA_SLOT_BASE + vcpu_id;
@@ -336,7 +337,8 @@ static void add_memslot_for_vcpu(
static void test_mem_conversions(enum vm_mem_backing_src_type src_type,
uint8_t nr_vcpus, uint32_t iterations,
- bool use_multiple_memslots)
+ bool use_multiple_memslots,
+ bool use_different_restrictedmem_files)
{
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
pthread_t threads[KVM_MAX_VCPUS];
@@ -356,21 +358,28 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type,
vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE));
npages_for_all_vcpus = DATA_SIZE / vm->page_size * nr_vcpus;
+ virt_map(vm, DATA_GPA_BASE, DATA_GPA_BASE, npages_for_all_vcpus);
if (use_multiple_memslots) {
- for (i = 0; i < nr_vcpus; i++)
- add_memslot_for_vcpu(vm, src_type, i);
+ int fd = memfd_restricted(0);
+ int offset = 0;
+
+ for (i = 0; i < nr_vcpus; i++) {
+ if (use_different_restrictedmem_files) {
+ if (i > 0)
+ fd = memfd_restricted(0);
+ } else {
+ offset = i * DATA_GPA_SPACING;
+ }
+
+ add_memslot_for_vcpu(vm, src_type, i, fd, offset);
+ }
} else {
vm_userspace_mem_region_add(
vm, src_type, DATA_GPA_BASE, DATA_SLOT_BASE,
npages_for_all_vcpus, KVM_MEM_PRIVATE);
}
- virt_map(vm, DATA_GPA_BASE, DATA_GPA_BASE, npages_for_all_vcpus);
-
- for (i = 0; i < nr_vcpus; i++)
- add_memslot_for_vcpu(vm, src_type, i);
-
for (i = 0; i < nr_vcpus; i++) {
args[i].vm = vm;
args[i].vcpu = vcpus[i];
@@ -382,7 +391,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type,
for (i = 0; i < nr_vcpus; i++)
pthread_join(threads[i], NULL);
- if (!use_multiple_memslots)
+ if (!use_multiple_memslots || !use_different_restrictedmem_files)
test_invalidation_code_unbound(vm, 1, DATA_SIZE * nr_vcpus);
else
test_invalidation_code_unbound(vm, nr_vcpus, DATA_SIZE);
@@ -391,8 +400,9 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type,
static void usage(const char *command)
{
puts("");
- printf("usage: %s [-h] [-m] [-s mem-type] [-n number-of-vcpus] [-i number-of-iterations]\n",
- command);
+ printf("usage: %s\n", command);
+ printf(" [-h] [-m] [-f] [-s mem-type]\n");
+ printf(" [-n number-of-vcpus] [-i number-of-iterations]\n");
puts("");
backing_src_help("-s");
puts("");
@@ -404,6 +414,9 @@ static void usage(const char *command)
puts("");
puts(" -m: use multiple memslots (default: use 1 memslot)");
puts("");
+ puts(" -f: use different restrictedmem files for each memslot");
+ puts(" (default: use 1 restrictedmem file for all memslots)");
+ puts("");
}
int main(int argc, char *argv[])
@@ -412,12 +425,13 @@ int main(int argc, char *argv[])
uint8_t nr_vcpus = 2;
uint32_t iterations = 10;
bool use_multiple_memslots = false;
+ bool use_different_restrictedmem_files = false;
int opt;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_HYPERCALL));
TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_PROTECTED_VM));
- while ((opt = getopt(argc, argv, "mhs:n:i:")) != -1) {
+ while ((opt = getopt(argc, argv, "fmhs:n:i:")) != -1) {
switch (opt) {
case 'n':
nr_vcpus = atoi_positive("nr_vcpus", optarg);
@@ -431,6 +445,9 @@ int main(int argc, char *argv[])
case 'm':
use_multiple_memslots = true;
break;
+ case 'f':
+ use_different_restrictedmem_files = true;
+ break;
case 'h':
default:
usage(argv[0]);
@@ -438,6 +455,13 @@ int main(int argc, char *argv[])
}
}
- test_mem_conversions(src_type, nr_vcpus, iterations, use_multiple_memslots);
+ if (!use_multiple_memslots && use_different_restrictedmem_files) {
+ printf("Overriding -f flag: ");
+ puts("Using just 1 restrictedmem file since only 1 memslot is to be used.");
+ use_different_restrictedmem_files = false;
+ }
+
+ test_mem_conversions(src_type, nr_vcpus, iterations, use_multiple_memslots,
+ use_different_restrictedmem_files);
return 0;
}
--
2.40.0.rc2.332.ga46443480c-goog
next prev parent reply other threads:[~2023-03-16 0:32 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-16 0:30 [RFC PATCH 00/10] Additional selftests for restrictedmem Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 01/10] KVM: selftests: Test error message fixes for memfd_restricted selftests Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 02/10] KVM: selftests: Test that ftruncate to non-page-aligned size on a restrictedmem fd should fail Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 03/10] KVM: selftests: Test that VM private memory should not be readable from host Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 04/10] KVM: selftests: Exercise restrictedmem allocation and truncation code after KVM invalidation code has been unbound Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 05/10] KVM: selftests: Generalize private_mem_conversions_test for parallel execution Ackerley Tng
2023-03-16 0:30 ` [RFC PATCH 06/10] KVM: selftests: Default private_mem_conversions_test to use 1 memslot for test data Ackerley Tng
2023-03-16 0:31 ` [RFC PATCH 07/10] KVM: selftests: Add vm_userspace_mem_region_add_with_restrictedmem Ackerley Tng
2023-03-16 0:31 ` Ackerley Tng [this message]
2023-03-16 0:31 ` [RFC PATCH 09/10] KVM: selftests: Add tests around sharing a restrictedmem fd Ackerley Tng
2023-03-16 0:31 ` [RFC PATCH 10/10] KVM: selftests: Test KVM exit behavior for private memory/access Ackerley Tng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=287d6e84dc788d84599392ca5d65864201f9a6a4.1678926164.git.ackerleytng@google.com \
--to=ackerleytng@google.com \
--cc=aarcange@redhat.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=bfields@fieldses.org \
--cc=bp@alien8.de \
--cc=chao.p.peng@linux.intel.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=ddutile@redhat.com \
--cc=dhildenb@redhat.com \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=jlayton@kernel.org \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=jun.nakajima@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=linmiaohe@huawei.com \
--cc=linux-api@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mail@maciej.szmigiero.name \
--cc=mhocko@suse.com \
--cc=michael.roth@amd.com \
--cc=mingo@redhat.com \
--cc=naoya.horiguchi@nec.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qperret@google.com \
--cc=rppt@kernel.org \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=steven.price@arm.com \
--cc=tabba@google.com \
--cc=tglx@linutronix.de \
--cc=vannapurve@google.com \
--cc=vbabka@suse.cz \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
--cc=wei.w.wang@intel.com \
--cc=x86@kernel.org \
--cc=yu.c.zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).