kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
From: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
To: Gavin Shan <gshan@redhat.com>
Cc: kvm@vger.kernel.org, maz@kernel.org,
	linux-kernel@vger.kernel.org, zhenyzha@redhat.com,
	shan.gavin@gmail.com, kvmarm@lists.linux.dev,
	pbonzini@redhat.com, shuah@kernel.org,
	kvmarm@lists.cs.columbia.edu, ajones@ventanamicro.com
Subject: Re: [PATCH 4/6] KVM: selftests: memslot_perf_test: Support variable guest page size
Date: Mon, 17 Oct 2022 23:31:57 +0200	[thread overview]
Message-ID: <3eecebca-a526-d10a-02d3-496ce919d577@maciej.szmigiero.name> (raw)
In-Reply-To: <20221014071914.227134-5-gshan@redhat.com>

On 14.10.2022 09:19, Gavin Shan wrote:
> The test case is obviously broken on aarch64 because non-4KB guest
> page size is supported. The guest page size on aarch64 could be 4KB,
> 16KB or 64KB.
> 
> This supports variable guest page size, mostly for aarch64.
> 
>    - The host determines the guest page size when virtual machine is
>      created. The value is also passed to guest through the synchronization
>      area.
> 
>    - The number of guest pages are unknown until the virtual machine
>      is to be created. So all the related macros are dropped. Instead,
>      their values are dynamically calculated based on the guest page
>      size.
> 
>    - The static checks on memory sizes and pages becomes dependent
>      on guest page size, which is unknown until the virtual machine
>      is about to be created. So all the static checks are converted
>      to dynamic checks, done in check_memory_sizes().
> 
>    - As the address passed to madvise() should be aligned to host page,
>      the size of page chunk is automatically selected, other than one
>      page.
> 
>    - All other changes included in this patch are almost mechanical
>      replacing '4096' with 'guest_page_size'.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>   .../testing/selftests/kvm/memslot_perf_test.c | 191 +++++++++++-------
>   1 file changed, 115 insertions(+), 76 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
> index d5aa9148f96f..d587bd952ff9 100644
> --- a/tools/testing/selftests/kvm/memslot_perf_test.c
> +++ b/tools/testing/selftests/kvm/memslot_perf_test.c
> @@ -26,14 +26,11 @@
>   #include <processor.h>
>   
>   #define MEM_SIZE		((512U << 20) + 4096)
> -#define MEM_SIZE_PAGES		(MEM_SIZE / 4096)
>   #define MEM_GPA		0x10000000UL
>   #define MEM_AUX_GPA		MEM_GPA
>   #define MEM_SYNC_GPA		MEM_AUX_GPA
>   #define MEM_TEST_GPA		(MEM_AUX_GPA + 4096)
>   #define MEM_TEST_SIZE		(MEM_SIZE - 4096)
> -static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
> -static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
>   
>   /*
>    * 32 MiB is max size that gets well over 100 iterations on 509 slots.
> @@ -42,29 +39,16 @@ static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
>    * limited resolution).
>    */
>   #define MEM_SIZE_MAP		((32U << 20) + 4096)
> -#define MEM_SIZE_MAP_PAGES	(MEM_SIZE_MAP / 4096)
>   #define MEM_TEST_MAP_SIZE	(MEM_SIZE_MAP - 4096)
> -#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
> -static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
>   
>   /*
>    * 128 MiB is min size that fills 32k slots with at least one page in each
>    * while at the same time gets 100+ iterations in such test
> + *
> + * 2 MiB chunk size like a typical huge page
>    */
>   #define MEM_TEST_UNMAP_SIZE		(128U << 20)
> -#define MEM_TEST_UNMAP_SIZE_PAGES	(MEM_TEST_UNMAP_SIZE / 4096)
> -/* 2 MiB chunk size like a typical huge page */
> -#define MEM_TEST_UNMAP_CHUNK_PAGES	(2U << (20 - 12))
> -static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
> -	      "invalid unmap test region size");
> -static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
> -	      "invalid unmap test region size");
> -static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
> -	      (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
> -	      "invalid unmap test region size");
> +#define MEM_TEST_UNMAP_CHUNK_SIZE	(2U << 20)
>   
>   /*
>    * For the move active test the middle of the test area is placed on
> @@ -77,8 +61,7 @@ static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
>    * for the total size of 25 pages.
>    * Hence, the maximum size here is 50 pages.
>    */
> -#define MEM_TEST_MOVE_SIZE_PAGES	(50)
> -#define MEM_TEST_MOVE_SIZE		(MEM_TEST_MOVE_SIZE_PAGES * 4096)
> +#define MEM_TEST_MOVE_SIZE		0x32000

The above number seems less readable than an explicit value of 50 pages.

In addition to that, it's 50 pages only with 4k page size, so at least
the comment above needs to be updated to reflect this fact.

>   #define MEM_TEST_MOVE_GPA_DEST		(MEM_GPA + MEM_SIZE)
>   static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
>   	      "invalid move test region size");
(...)
> @@ -242,33 +229,34 @@ static struct vm_data *alloc_vm(void)
>   }
>   
>   static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
> -		       void *guest_code, uint64_t mempages,
> +		       void *guest_code, uint64_t mem_size,
>   		       struct timespec *slot_runtime)
>   {
> -	uint64_t rempages;
> +	uint64_t mempages, rempages;
>   	uint64_t guest_addr;
> -	uint32_t slot;
> +	uint32_t slot, guest_page_size;
>   	struct timespec tstart;
>   	struct sync_area *sync;
>   
> -	TEST_ASSERT(mempages > 1,
> -		    "Can't test without any memory");
> +	guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
> +	mempages = mem_size / guest_page_size;
> +
> +	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
> +	ucall_init(data->vm, NULL);
>

TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size")
here would catch the case if someone accidentally modifies
__vm_create_with_one_vcpu() to use other page size than specified for
VM_MODE_DEFAULT.

>   	data->npages = mempages;
> +	TEST_ASSERT(data->npages > 1, "Can't test without any memory");
>   	data->nslots = nslots;
> -	data->pages_per_slot = mempages / data->nslots;
> +	data->pages_per_slot = data->npages / data->nslots;
>   	if (!data->pages_per_slot) {
> -		*maxslots = mempages + 1;
> +		*maxslots = data->npages + 1;
>   		return false;
>   	}
>   
> -	rempages = mempages % data->nslots;
> +	rempages = data->npages % data->nslots;
>   	data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
>   	TEST_ASSERT(data->hva_slots, "malloc() fail");
>   
> -	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
> -	ucall_init(data->vm, NULL);
> -
>   	pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
>   		data->nslots, data->pages_per_slot, rempages);
>   
(...)
> @@ -856,6 +863,35 @@ static void help(char *name, struct test_args *targs)
>   		pr_info("%d: %s\n", ctr, tests[ctr].name);
>   }
>   
> +static bool check_memory_sizes(void)
> +{
> +	uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
> +
> +	if (MEM_SIZE % guest_page_size ||
> +	    MEM_TEST_SIZE % guest_page_size) {
> +		pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
> +		return false;
> +	}
> +
> +	if (MEM_SIZE_MAP % guest_page_size		||
> +	    MEM_TEST_MAP_SIZE % guest_page_size		||
> +	    (MEM_TEST_MAP_SIZE / guest_page_size) <= 2	||
> +	    (MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
> +		pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
> +		return false;
> +	}
> +
> +	if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE		||
> +	    MEM_TEST_UNMAP_SIZE % guest_page_size	||
> +	    (MEM_TEST_UNMAP_SIZE / guest_page_size) %
> +	    (MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {

This should be (MEM_TEST_UNMAP_SIZE / guest_page_size) % (2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size))
to match the old static_assert().

Thanks,
Maciej

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
To: Gavin Shan <gshan@redhat.com>
Cc: kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org, ajones@ventanamicro.com,
	pbonzini@redhat.com, maz@kernel.org, shuah@kernel.org,
	oliver.upton@linux.dev, seanjc@google.com, peterx@redhat.com,
	ricarkol@google.com, zhenyzha@redhat.com, shan.gavin@gmail.com,
	kvmarm@lists.linux.dev
Subject: Re: [PATCH 4/6] KVM: selftests: memslot_perf_test: Support variable guest page size
Date: Mon, 17 Oct 2022 23:31:57 +0200	[thread overview]
Message-ID: <3eecebca-a526-d10a-02d3-496ce919d577@maciej.szmigiero.name> (raw)
Message-ID: <20221017213157.uB0ORxBbBDG0UffNt_Bkj4JYbAB7Hmbo2rlzrOpLI2g@z> (raw)
In-Reply-To: <20221014071914.227134-5-gshan@redhat.com>

On 14.10.2022 09:19, Gavin Shan wrote:
> The test case is obviously broken on aarch64 because non-4KB guest
> page size is supported. The guest page size on aarch64 could be 4KB,
> 16KB or 64KB.
> 
> This supports variable guest page size, mostly for aarch64.
> 
>    - The host determines the guest page size when virtual machine is
>      created. The value is also passed to guest through the synchronization
>      area.
> 
>    - The number of guest pages are unknown until the virtual machine
>      is to be created. So all the related macros are dropped. Instead,
>      their values are dynamically calculated based on the guest page
>      size.
> 
>    - The static checks on memory sizes and pages becomes dependent
>      on guest page size, which is unknown until the virtual machine
>      is about to be created. So all the static checks are converted
>      to dynamic checks, done in check_memory_sizes().
> 
>    - As the address passed to madvise() should be aligned to host page,
>      the size of page chunk is automatically selected, other than one
>      page.
> 
>    - All other changes included in this patch are almost mechanical
>      replacing '4096' with 'guest_page_size'.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>   .../testing/selftests/kvm/memslot_perf_test.c | 191 +++++++++++-------
>   1 file changed, 115 insertions(+), 76 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
> index d5aa9148f96f..d587bd952ff9 100644
> --- a/tools/testing/selftests/kvm/memslot_perf_test.c
> +++ b/tools/testing/selftests/kvm/memslot_perf_test.c
> @@ -26,14 +26,11 @@
>   #include <processor.h>
>   
>   #define MEM_SIZE		((512U << 20) + 4096)
> -#define MEM_SIZE_PAGES		(MEM_SIZE / 4096)
>   #define MEM_GPA		0x10000000UL
>   #define MEM_AUX_GPA		MEM_GPA
>   #define MEM_SYNC_GPA		MEM_AUX_GPA
>   #define MEM_TEST_GPA		(MEM_AUX_GPA + 4096)
>   #define MEM_TEST_SIZE		(MEM_SIZE - 4096)
> -static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
> -static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
>   
>   /*
>    * 32 MiB is max size that gets well over 100 iterations on 509 slots.
> @@ -42,29 +39,16 @@ static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
>    * limited resolution).
>    */
>   #define MEM_SIZE_MAP		((32U << 20) + 4096)
> -#define MEM_SIZE_MAP_PAGES	(MEM_SIZE_MAP / 4096)
>   #define MEM_TEST_MAP_SIZE	(MEM_SIZE_MAP - 4096)
> -#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
> -static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
> -static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
>   
>   /*
>    * 128 MiB is min size that fills 32k slots with at least one page in each
>    * while at the same time gets 100+ iterations in such test
> + *
> + * 2 MiB chunk size like a typical huge page
>    */
>   #define MEM_TEST_UNMAP_SIZE		(128U << 20)
> -#define MEM_TEST_UNMAP_SIZE_PAGES	(MEM_TEST_UNMAP_SIZE / 4096)
> -/* 2 MiB chunk size like a typical huge page */
> -#define MEM_TEST_UNMAP_CHUNK_PAGES	(2U << (20 - 12))
> -static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
> -	      "invalid unmap test region size");
> -static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
> -	      "invalid unmap test region size");
> -static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
> -	      (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
> -	      "invalid unmap test region size");
> +#define MEM_TEST_UNMAP_CHUNK_SIZE	(2U << 20)
>   
>   /*
>    * For the move active test the middle of the test area is placed on
> @@ -77,8 +61,7 @@ static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
>    * for the total size of 25 pages.
>    * Hence, the maximum size here is 50 pages.
>    */
> -#define MEM_TEST_MOVE_SIZE_PAGES	(50)
> -#define MEM_TEST_MOVE_SIZE		(MEM_TEST_MOVE_SIZE_PAGES * 4096)
> +#define MEM_TEST_MOVE_SIZE		0x32000

The above number seems less readable than an explicit value of 50 pages.

In addition to that, it's 50 pages only with 4k page size, so at least
the comment above needs to be updated to reflect this fact.

>   #define MEM_TEST_MOVE_GPA_DEST		(MEM_GPA + MEM_SIZE)
>   static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
>   	      "invalid move test region size");
(...)
> @@ -242,33 +229,34 @@ static struct vm_data *alloc_vm(void)
>   }
>   
>   static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
> -		       void *guest_code, uint64_t mempages,
> +		       void *guest_code, uint64_t mem_size,
>   		       struct timespec *slot_runtime)
>   {
> -	uint64_t rempages;
> +	uint64_t mempages, rempages;
>   	uint64_t guest_addr;
> -	uint32_t slot;
> +	uint32_t slot, guest_page_size;
>   	struct timespec tstart;
>   	struct sync_area *sync;
>   
> -	TEST_ASSERT(mempages > 1,
> -		    "Can't test without any memory");
> +	guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
> +	mempages = mem_size / guest_page_size;
> +
> +	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
> +	ucall_init(data->vm, NULL);
>

TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size")
here would catch the case if someone accidentally modifies
__vm_create_with_one_vcpu() to use other page size than specified for
VM_MODE_DEFAULT.

>   	data->npages = mempages;
> +	TEST_ASSERT(data->npages > 1, "Can't test without any memory");
>   	data->nslots = nslots;
> -	data->pages_per_slot = mempages / data->nslots;
> +	data->pages_per_slot = data->npages / data->nslots;
>   	if (!data->pages_per_slot) {
> -		*maxslots = mempages + 1;
> +		*maxslots = data->npages + 1;
>   		return false;
>   	}
>   
> -	rempages = mempages % data->nslots;
> +	rempages = data->npages % data->nslots;
>   	data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
>   	TEST_ASSERT(data->hva_slots, "malloc() fail");
>   
> -	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
> -	ucall_init(data->vm, NULL);
> -
>   	pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
>   		data->nslots, data->pages_per_slot, rempages);
>   
(...)
> @@ -856,6 +863,35 @@ static void help(char *name, struct test_args *targs)
>   		pr_info("%d: %s\n", ctr, tests[ctr].name);
>   }
>   
> +static bool check_memory_sizes(void)
> +{
> +	uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
> +
> +	if (MEM_SIZE % guest_page_size ||
> +	    MEM_TEST_SIZE % guest_page_size) {
> +		pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
> +		return false;
> +	}
> +
> +	if (MEM_SIZE_MAP % guest_page_size		||
> +	    MEM_TEST_MAP_SIZE % guest_page_size		||
> +	    (MEM_TEST_MAP_SIZE / guest_page_size) <= 2	||
> +	    (MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
> +		pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
> +		return false;
> +	}
> +
> +	if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE		||
> +	    MEM_TEST_UNMAP_SIZE % guest_page_size	||
> +	    (MEM_TEST_UNMAP_SIZE / guest_page_size) %
> +	    (MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {

This should be (MEM_TEST_UNMAP_SIZE / guest_page_size) % (2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size))
to match the old static_assert().

Thanks,
Maciej


  parent reply	other threads:[~2022-10-19  6:31 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-14  7:19 [PATCH 0/6] KVM: selftests: memslot_perf_test: aarch64 cleanup/fixes Gavin Shan
2022-10-14  7:19 ` Gavin Shan
2022-10-14  7:19 ` [PATCH 1/6] KVM: selftests: memslot_perf_test: Use data->nslots in prepare_vm() Gavin Shan
2022-10-14  7:19   ` Gavin Shan
2022-10-14  7:19 ` [PATCH 2/6] KVM: selftests: memslot_perf_test: Consolidate loop conditions " Gavin Shan
2022-10-14  7:19   ` Gavin Shan
2022-10-14  7:19 ` [PATCH 3/6] KVM: selftests: memslot_perf_test: Probe memory slots for once Gavin Shan
2022-10-14  7:19   ` Gavin Shan
2022-10-17 17:34   ` Maciej S. Szmigiero
2022-10-17 17:34     ` Maciej S. Szmigiero
2022-10-17 22:18     ` Gavin Shan
2022-10-17 22:18       ` Gavin Shan
2022-10-14  7:19 ` [PATCH 4/6] KVM: selftests: memslot_perf_test: Support variable guest page size Gavin Shan
2022-10-14  7:19   ` Gavin Shan
2022-10-17 21:31   ` Maciej S. Szmigiero [this message]
2022-10-17 21:31     ` Maciej S. Szmigiero
2022-10-18  0:46     ` Gavin Shan
2022-10-18  0:46       ` Gavin Shan
2022-10-18  0:51       ` Gavin Shan
2022-10-18  0:51         ` Gavin Shan
2022-10-18 15:56         ` Maciej S. Szmigiero
2022-10-18 15:56           ` Maciej S. Szmigiero
2022-10-19  0:26           ` Gavin Shan
2022-10-19  0:26             ` Gavin Shan
2022-10-19 20:18             ` Maciej S. Szmigiero
2022-10-19 20:18               ` Maciej S. Szmigiero
2022-10-20  7:19               ` Gavin Shan
2022-10-20  7:19                 ` Gavin Shan
2022-10-14  7:19 ` [PATCH 5/6] KVM: selftests: memslot_perf_test: Consolidate memory sizes Gavin Shan
2022-10-14  7:19   ` Gavin Shan
2022-10-17 21:36   ` Maciej S. Szmigiero
2022-10-17 21:36     ` Maciej S. Szmigiero
2022-10-17 22:08     ` Sean Christopherson
2022-10-17 22:08       ` Sean Christopherson
2022-10-17 22:51       ` Gavin Shan
2022-10-17 22:51         ` Gavin Shan
2022-10-17 22:56         ` Maciej S. Szmigiero
2022-10-17 22:56           ` Maciej S. Szmigiero
2022-10-17 23:10           ` Gavin Shan
2022-10-17 23:10             ` Gavin Shan
2022-10-17 23:32             ` Sean Christopherson
2022-10-17 23:32               ` Sean Christopherson
2022-10-17 23:39               ` Gavin Shan
2022-10-17 23:39                 ` Gavin Shan
2022-10-18  7:47       ` Oliver Upton
2022-10-18  7:47         ` Oliver Upton
2022-10-18  8:48         ` Gavin Shan
2022-10-18  8:48           ` Gavin Shan
2022-10-18  1:13     ` Gavin Shan
2022-10-18  1:13       ` Gavin Shan
2022-10-14  7:19 ` [PATCH 6/6] KVM: selftests: memslot_perf_test: Report optimal memory slots Gavin Shan
2022-10-14  7:19   ` Gavin Shan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3eecebca-a526-d10a-02d3-496ce919d577@maciej.szmigiero.name \
    --to=mail@maciej.szmigiero.name \
    --cc=ajones@ventanamicro.com \
    --cc=gshan@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=shan.gavin@gmail.com \
    --cc=shuah@kernel.org \
    --cc=zhenyzha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).