All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Jones <drjones@redhat.com>
To: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Peter Xu <peterx@redhat.com>,
	Wainer dos Santos Moschetta <wainersm@redhat.com>
Subject: Re: [PATCH 02/10] KVM: selftests: Use kernel's list instead of homebrewed replacement
Date: Tue, 14 Apr 2020 18:03:21 +0200	[thread overview]
Message-ID: <20200414160321.3sq33f24fhh7r5ju@kamzik.brq.redhat.com> (raw)
In-Reply-To: <20200410231707.7128-3-sean.j.christopherson@intel.com>

On Fri, Apr 10, 2020 at 04:16:59PM -0700, Sean Christopherson wrote:
> Replace the KVM selftests' homebrewed linked lists for vCPUs and memory
> regions with the kernel's 'struct list_head'.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  .../testing/selftests/kvm/include/kvm_util.h  |  1 +
>  tools/testing/selftests/kvm/lib/kvm_util.c    | 94 ++++++++-----------
>  .../selftests/kvm/lib/kvm_util_internal.h     |  8 +-
>  .../selftests/kvm/lib/s390x/processor.c       |  5 +-
>  4 files changed, 48 insertions(+), 60 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
> index a99b875f50d2..2f329e785c58 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util.h
> @@ -10,6 +10,7 @@
>  #include "test_util.h"
>  
>  #include "asm/kvm.h"
> +#include "linux/list.h"
>  #include "linux/kvm.h"
>  #include <sys/ioctl.h>
>  
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 9a783c20dd26..105ee9bc09f0 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -161,6 +161,9 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
>  	vm = calloc(1, sizeof(*vm));
>  	TEST_ASSERT(vm != NULL, "Insufficient Memory");
>  
> +	INIT_LIST_HEAD(&vm->vcpus);
> +	INIT_LIST_HEAD(&vm->userspace_mem_regions);
> +
>  	vm->mode = mode;
>  	vm->type = 0;
>  
> @@ -258,8 +261,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
>  	if (vmp->has_irqchip)
>  		vm_create_irqchip(vmp);
>  
> -	for (region = vmp->userspace_mem_region_head; region;
> -		region = region->next) {
> +	list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
>  		int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
>  		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
>  			    "  rc: %i errno: %i\n"
> @@ -319,8 +321,7 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
>  {
>  	struct userspace_mem_region *region;
>  
> -	for (region = vm->userspace_mem_region_head; region;
> -		region = region->next) {
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
>  		uint64_t existing_start = region->region.guest_phys_addr;
>  		uint64_t existing_end = region->region.guest_phys_addr
>  			+ region->region.memory_size - 1;
> @@ -378,11 +379,11 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
>   */
>  struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
>  {
> -	struct vcpu *vcpup;
> +	struct vcpu *vcpu;
>  
> -	for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
> -		if (vcpup->id == vcpuid)
> -			return vcpup;
> +	list_for_each_entry(vcpu, &vm->vcpus, list) {
> +		if (vcpu->id == vcpuid)
> +			return vcpu;
>  	}
>  
>  	return NULL;
> @@ -392,16 +393,15 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
>   * VM VCPU Remove
>   *
>   * Input Args:
> - *   vm - Virtual Machine
>   *   vcpu - VCPU to remove
>   *
>   * Output Args: None
>   *
>   * Return: None, TEST_ASSERT failures for all error conditions
>   *
> - * Within the VM specified by vm, removes the VCPU given by vcpuid.
> + * Removes a vCPU from a VM and frees its resources.
>   */
> -static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
> +static void vm_vcpu_rm(struct vcpu *vcpu)
>  {
>  	int ret;
>  
> @@ -412,21 +412,17 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
>  	TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
>  		"errno: %i", ret, errno);
>  
> -	if (vcpu->next)
> -		vcpu->next->prev = vcpu->prev;
> -	if (vcpu->prev)
> -		vcpu->prev->next = vcpu->next;
> -	else
> -		vm->vcpu_head = vcpu->next;
> +	list_del(&vcpu->list);
>  	free(vcpu);
>  }
>  
>  void kvm_vm_release(struct kvm_vm *vmp)
>  {
> +	struct vcpu *vcpu, *tmp;
>  	int ret;
>  
> -	while (vmp->vcpu_head)
> -		vm_vcpu_rm(vmp, vmp->vcpu_head);
> +	list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
> +		vm_vcpu_rm(vcpu);
>  
>  	ret = close(vmp->fd);
>  	TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
> @@ -442,15 +438,15 @@ void kvm_vm_release(struct kvm_vm *vmp)
>   */
>  void kvm_vm_free(struct kvm_vm *vmp)
>  {
> +	struct userspace_mem_region *region, *tmp;
>  	int ret;
>  
>  	if (vmp == NULL)
>  		return;
>  
>  	/* Free userspace_mem_regions. */
> -	while (vmp->userspace_mem_region_head) {
> -		struct userspace_mem_region *region
> -			= vmp->userspace_mem_region_head;
> +	list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) {
> +		list_del(&region->list);
>  
>  		region->region.memory_size = 0;
>  		ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
> @@ -458,7 +454,6 @@ void kvm_vm_free(struct kvm_vm *vmp)
>  		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
>  			"rc: %i errno: %i", ret, errno);
>  
> -		vmp->userspace_mem_region_head = region->next;
>  		sparsebit_free(&region->unused_phy_pages);
>  		ret = munmap(region->mmap_start, region->mmap_size);
>  		TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
> @@ -611,12 +606,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
>  			(uint64_t) region->region.memory_size);
>  
>  	/* Confirm no region with the requested slot already exists. */
> -	for (region = vm->userspace_mem_region_head; region;
> -		region = region->next) {
> -		if (region->region.slot == slot)
> -			break;
> -	}
> -	if (region != NULL)
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
> +		if (region->region.slot != slot)
> +			continue;
> +
>  		TEST_FAIL("A mem region with the requested slot "
>  			"already exists.\n"
>  			"  requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
> @@ -625,6 +618,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
>  			region->region.slot,
>  			(uint64_t) region->region.guest_phys_addr,
>  			(uint64_t) region->region.memory_size);
> +	}
>  
>  	/* Allocate and initialize new mem region structure. */
>  	region = calloc(1, sizeof(*region));
> @@ -685,10 +679,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
>  		guest_paddr, (uint64_t) region->region.memory_size);
>  
>  	/* Add to linked-list of memory regions. */
> -	if (vm->userspace_mem_region_head)
> -		vm->userspace_mem_region_head->prev = region;
> -	region->next = vm->userspace_mem_region_head;
> -	vm->userspace_mem_region_head = region;
> +	list_add(&region->list, &vm->userspace_mem_regions);
>  }
>  
>  /*
> @@ -711,20 +702,17 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
>  {
>  	struct userspace_mem_region *region;
>  
> -	for (region = vm->userspace_mem_region_head; region;
> -		region = region->next) {
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
>  		if (region->region.slot == memslot)
> -			break;
> -	}
> -	if (region == NULL) {
> -		fprintf(stderr, "No mem region with the requested slot found,\n"
> -			"  requested slot: %u\n", memslot);
> -		fputs("---- vm dump ----\n", stderr);
> -		vm_dump(stderr, vm, 2);
> -		TEST_FAIL("Mem region not found");
> +			return region;
>  	}
>  
> -	return region;
> +	fprintf(stderr, "No mem region with the requested slot found,\n"
> +		"  requested slot: %u\n", memslot);
> +	fputs("---- vm dump ----\n", stderr);
> +	vm_dump(stderr, vm, 2);
> +	TEST_FAIL("Mem region not found");
> +	return NULL;
>  }
>  
>  /*
> @@ -862,10 +850,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
>  		"vcpu id: %u errno: %i", vcpuid, errno);
>  
>  	/* Add to linked-list of VCPUs. */
> -	if (vm->vcpu_head)
> -		vm->vcpu_head->prev = vcpu;
> -	vcpu->next = vm->vcpu_head;
> -	vm->vcpu_head = vcpu;
> +	list_add(&vcpu->list, &vm->vcpus);
>  }
>  
>  /*
> @@ -1058,8 +1043,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
>  void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
>  {
>  	struct userspace_mem_region *region;
> -	for (region = vm->userspace_mem_region_head; region;
> -	     region = region->next) {
> +
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
>  		if ((gpa >= region->region.guest_phys_addr)
>  			&& (gpa <= (region->region.guest_phys_addr
>  				+ region->region.memory_size - 1)))
> @@ -1091,8 +1076,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
>  vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
>  {
>  	struct userspace_mem_region *region;
> -	for (region = vm->userspace_mem_region_head; region;
> -	     region = region->next) {
> +
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
>  		if ((hva >= region->host_mem)
>  			&& (hva <= (region->host_mem
>  				+ region->region.memory_size - 1)))
> @@ -1519,8 +1504,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
>  	fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
>  	fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
>  	fprintf(stream, "%*sMem Regions:\n", indent, "");
> -	for (region = vm->userspace_mem_region_head; region;
> -		region = region->next) {
> +	list_for_each_entry(region, &vm->userspace_mem_regions, list) {
>  		fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
>  			"host_virt: %p\n", indent + 2, "",
>  			(uint64_t) region->region.guest_phys_addr,
> @@ -1539,7 +1523,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
>  		virt_dump(stream, vm, indent + 4);
>  	}
>  	fprintf(stream, "%*sVCPUs:\n", indent, "");
> -	for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
> +	list_for_each_entry(vcpu, &vm->vcpus, list)
>  		vcpu_dump(stream, vm, vcpu->id, indent + 2);
>  }
>  
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
> index ca56a0133127..2ef446520748 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
> +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
> @@ -13,7 +13,6 @@
>  #define KVM_DEV_PATH		"/dev/kvm"
>  
>  struct userspace_mem_region {
> -	struct userspace_mem_region *next, *prev;
>  	struct kvm_userspace_memory_region region;
>  	struct sparsebit *unused_phy_pages;
>  	int fd;
> @@ -21,10 +20,11 @@ struct userspace_mem_region {
>  	void *host_mem;
>  	void *mmap_start;
>  	size_t mmap_size;
> +	struct list_head list;
>  };
>  
>  struct vcpu {
> -	struct vcpu *next, *prev;
> +	struct list_head list;
>  	uint32_t id;
>  	int fd;
>  	struct kvm_run *state;
> @@ -41,8 +41,8 @@ struct kvm_vm {
>  	unsigned int pa_bits;
>  	unsigned int va_bits;
>  	uint64_t max_gfn;
> -	struct vcpu *vcpu_head;
> -	struct userspace_mem_region *userspace_mem_region_head;
> +	struct list_head vcpus;
> +	struct list_head userspace_mem_regions;
>  	struct sparsebit *vpages_valid;
>  	struct sparsebit *vpages_mapped;
>  	bool has_irqchip;
> diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
> index 8d94961bd046..a88c5d665725 100644
> --- a/tools/testing/selftests/kvm/lib/s390x/processor.c
> +++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
> @@ -233,7 +233,10 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
>  
>  void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
>  {
> -	struct vcpu *vcpu = vm->vcpu_head;
> +	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
> +
> +	if (!vcpu)
> +		return;
>  
>  	fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
>  		indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
> -- 
> 2.26.0
>

Reviewed-by: Andrew Jones <drjones@redhat.com>


  reply	other threads:[~2020-04-14 16:04 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-10 23:16 [PATCH 00/10] KVM: selftests: Add KVM_SET_MEMORY_REGION tests Sean Christopherson
2020-04-10 23:16 ` [PATCH 01/10] KVM: selftests: Take vcpu pointer instead of id in vm_vcpu_rm() Sean Christopherson
2020-04-13 18:26   ` Wainer dos Santos Moschetta
2020-04-13 21:26     ` Sean Christopherson
2020-04-14  8:25       ` Andrew Jones
2020-04-14 13:02         ` Wainer dos Santos Moschetta
2020-04-15 15:11       ` Paolo Bonzini
2020-04-14 16:02   ` Andrew Jones
2020-04-10 23:16 ` [PATCH 02/10] KVM: selftests: Use kernel's list instead of homebrewed replacement Sean Christopherson
2020-04-14 16:03   ` Andrew Jones [this message]
2020-04-10 23:17 ` [PATCH 03/10] KVM: selftests: Add util to delete memory region Sean Christopherson
2020-04-13 18:52   ` Wainer dos Santos Moschetta
2020-04-14 16:04   ` Andrew Jones
2020-04-10 23:17 ` [PATCH 04/10] KVM: selftests: Add GUEST_ASSERT variants to pass values to host Sean Christopherson
2020-04-14 16:02   ` Andrew Jones
2020-04-10 23:17 ` [PATCH 05/10] KVM: sefltests: Add explicit synchronization to move mem region test Sean Christopherson
2020-04-10 23:17 ` [PATCH 06/10] KVM: selftests: Add "delete" testcase to set_memory_region_test Sean Christopherson
2020-04-14 16:19   ` Andrew Jones
2020-04-14 16:29     ` Andrew Jones
2020-04-10 23:17 ` [PATCH 07/10] selftests: kvm: Add vm_get_fd() in kvm_util Sean Christopherson
2020-04-10 23:17 ` [PATCH 08/10] KVM: selftests: Add "zero" testcase to set_memory_region_test Sean Christopherson
2020-04-10 23:17 ` [PATCH 09/10] KVM: selftests: Make set_memory_region_test common to all architectures Sean Christopherson
2020-04-14 14:43   ` Wainer dos Santos Moschetta
2020-04-10 23:17 ` [PATCH 10/10] selftests: kvm: Add testcase for creating max number of memslots Sean Christopherson
2020-04-13 13:22   ` Wainer dos Santos Moschetta
2020-04-15 15:40 ` [PATCH 00/10] KVM: selftests: Add KVM_SET_MEMORY_REGION tests Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200414160321.3sq33f24fhh7r5ju@kamzik.brq.redhat.com \
    --to=drjones@redhat.com \
    --cc=borntraeger@de.ibm.com \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=wainersm@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.