All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Qian Cai <cai@lca.pw>, Peter Xu <peterx@redhat.com>
Subject: [PATCH 4/7] KVM: selftests: Add helpers to consolidate open coded list operations
Date: Fri, 20 Mar 2020 13:55:43 -0700	[thread overview]
Message-ID: <20200320205546.2396-5-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200320205546.2396-1-sean.j.christopherson@intel.com>

Add helpers for the KVM sefltests' variant of a linked list to replace a
variety of open coded adds, deletes and iterators.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 tools/testing/selftests/kvm/lib/kvm_util.c | 68 ++++++++++++----------
 1 file changed, 37 insertions(+), 31 deletions(-)

diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9a783c20dd26..d7b74f465570 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -19,6 +19,27 @@
 #define KVM_UTIL_PGS_PER_HUGEPG 512
 #define KVM_UTIL_MIN_PFN	2
 
+#define kvm_list_add(head, new)		\
+do {					\
+	if (head)			\
+		head->prev = new;	\
+	new->next = head;		\
+	head = new;			\
+} while (0)
+
+#define kvm_list_del(head, del)			\
+do {						\
+	if (del->next)				\
+		del->next->prev = del->prev;	\
+	if (del->prev)				\
+		del->prev->next = del->next;	\
+	else					\
+		head = del->next;		\
+} while (0)
+
+#define kvm_list_for_each(head, iter)		\
+	for (iter = head; iter; iter = iter->next)
+
 /* Aligns x up to the next multiple of size. Size must be a power of 2. */
 static void *align(void *x, size_t size)
 {
@@ -258,8 +279,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
 	if (vmp->has_irqchip)
 		vm_create_irqchip(vmp);
 
-	for (region = vmp->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vmp->userspace_mem_region_head, region) {
 		int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
 		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
 			    "  rc: %i errno: %i\n"
@@ -319,8 +339,7 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
 {
 	struct userspace_mem_region *region;
 
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		uint64_t existing_start = region->region.guest_phys_addr;
 		uint64_t existing_end = region->region.guest_phys_addr
 			+ region->region.memory_size - 1;
@@ -380,7 +399,7 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
 {
 	struct vcpu *vcpup;
 
-	for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
+	kvm_list_for_each(vm->vcpu_head, vcpup) {
 		if (vcpup->id == vcpuid)
 			return vcpup;
 	}
@@ -412,12 +431,7 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
 	TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
 		"errno: %i", ret, errno);
 
-	if (vcpu->next)
-		vcpu->next->prev = vcpu->prev;
-	if (vcpu->prev)
-		vcpu->prev->next = vcpu->next;
-	else
-		vm->vcpu_head = vcpu->next;
+	kvm_list_del(vm->vcpu_head, vcpu);
 	free(vcpu);
 }
 
@@ -452,13 +466,14 @@ void kvm_vm_free(struct kvm_vm *vmp)
 		struct userspace_mem_region *region
 			= vmp->userspace_mem_region_head;
 
+		kvm_list_del(vmp->userspace_mem_region_head, region);
+
 		region->region.memory_size = 0;
 		ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
 			&region->region);
 		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
 			"rc: %i errno: %i", ret, errno);
 
-		vmp->userspace_mem_region_head = region->next;
 		sparsebit_free(&region->unused_phy_pages);
 		ret = munmap(region->mmap_start, region->mmap_size);
 		TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
@@ -611,8 +626,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
 			(uint64_t) region->region.memory_size);
 
 	/* Confirm no region with the requested slot already exists. */
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if (region->region.slot == slot)
 			break;
 	}
@@ -685,10 +699,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
 		guest_paddr, (uint64_t) region->region.memory_size);
 
 	/* Add to linked-list of memory regions. */
-	if (vm->userspace_mem_region_head)
-		vm->userspace_mem_region_head->prev = region;
-	region->next = vm->userspace_mem_region_head;
-	vm->userspace_mem_region_head = region;
+	kvm_list_add(vm->userspace_mem_region_head, region);
 }
 
 /*
@@ -711,8 +722,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
 	struct userspace_mem_region *region;
 
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if (region->region.slot == memslot)
 			break;
 	}
@@ -862,10 +872,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
 		"vcpu id: %u errno: %i", vcpuid, errno);
 
 	/* Add to linked-list of VCPUs. */
-	if (vm->vcpu_head)
-		vm->vcpu_head->prev = vcpu;
-	vcpu->next = vm->vcpu_head;
-	vm->vcpu_head = vcpu;
+	kvm_list_add(vm->vcpu_head, vcpu);
 }
 
 /*
@@ -1058,8 +1065,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 {
 	struct userspace_mem_region *region;
-	for (region = vm->userspace_mem_region_head; region;
-	     region = region->next) {
+
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if ((gpa >= region->region.guest_phys_addr)
 			&& (gpa <= (region->region.guest_phys_addr
 				+ region->region.memory_size - 1)))
@@ -1091,8 +1098,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 {
 	struct userspace_mem_region *region;
-	for (region = vm->userspace_mem_region_head; region;
-	     region = region->next) {
+
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if ((hva >= region->host_mem)
 			&& (hva <= (region->host_mem
 				+ region->region.memory_size - 1)))
@@ -1519,8 +1526,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 	fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
 	fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
 	fprintf(stream, "%*sMem Regions:\n", indent, "");
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
 			"host_virt: %p\n", indent + 2, "",
 			(uint64_t) region->region.guest_phys_addr,
@@ -1539,7 +1545,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 		virt_dump(stream, vm, indent + 4);
 	}
 	fprintf(stream, "%*sVCPUs:\n", indent, "");
-	for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
+	kvm_list_for_each(vm->vcpu_head, vcpu)
 		vcpu_dump(stream, vm, vcpu->id, indent + 2);
 }
 
-- 
2.24.1


  parent reply	other threads:[~2020-03-20 20:55 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-20 20:55 [PATCH 0/7] KVM: Fix memslot use-after-free bug Sean Christopherson
2020-03-20 20:55 ` [PATCH 1/7] KVM: Fix out of range accesses to memslots Sean Christopherson
2020-03-20 22:17   ` Peter Xu
2020-03-20 22:40     ` Sean Christopherson
2020-03-20 22:58       ` Peter Xu
2020-03-20 23:07         ` Sean Christopherson
2020-03-24  7:12   ` Christian Borntraeger
2020-03-24 10:12     ` Claudio Imbrenda
2020-03-20 20:55 ` [PATCH 2/7] KVM: selftests: Fix cosmetic copy-paste error in vm_mem_region_move() Sean Christopherson
2020-03-20 20:55 ` [PATCH 3/7] KVM: selftests: Take vcpu pointer instead of id in vm_vcpu_rm() Sean Christopherson
2020-03-20 20:55 ` Sean Christopherson [this message]
2020-03-20 22:47   ` [PATCH 4/7] KVM: selftests: Add helpers to consolidate open coded list operations Peter Xu
2020-03-24 11:28     ` Paolo Bonzini
2020-03-20 20:55 ` [PATCH 5/7] KVM: selftests: Add util to delete memory region Sean Christopherson
2020-03-20 20:55 ` [PATCH 6/7] KVM: selftests: Expose the primary memslot number to tests Sean Christopherson
2020-03-23 19:12   ` Peter Xu
2020-03-23 21:28     ` Sean Christopherson
2020-03-20 20:55 ` [PATCH 7/7] KVM: selftests: Add "delete" testcase to set_memory_region_test Sean Christopherson
2020-03-23 19:06   ` Peter Xu
2020-03-23 21:43     ` Sean Christopherson
2020-03-23 21:58       ` Peter Xu
2020-03-24 11:30 ` [PATCH 0/7] KVM: Fix memslot use-after-free bug Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200320205546.2396-5-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=borntraeger@de.ibm.com \
    --cc=cai@lca.pw \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.