linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: Siddharth Chandrasekaran <sidcha@amazon.de>
Cc: Alexander Graf <graf@amazon.com>,
	Evgeny Iakovlev <eyakovl@amazon.de>,
	Liran Alon <liran@amazon.com>,
	Ioannis Aslanidis <iaslan@amazon.de>,
	linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, "K. Y. Srinivasan" <kys@microsoft.com>,
	Haiyang Zhang <haiyangz@microsoft.com>,
	Stephen Hemminger <sthemmin@microsoft.com>,
	Wei Liu <wei.liu@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>
Subject: Re: [PATCH v2 1/4] KVM: x86: Move FPU register accessors into fpu.h
Date: Tue, 13 Apr 2021 15:40:56 +0200	[thread overview]
Message-ID: <87y2dm5ml3.fsf@vitty.brq.redhat.com> (raw)
In-Reply-To: <5d2945df9dd807dca45ab256c88aeb4430ecf508.1618244920.git.sidcha@amazon.de>

Siddharth Chandrasekaran <sidcha@amazon.de> writes:

> Hyper-v XMM fast hypercalls use XMM registers to pass input/output
> parameters. To access these, hyperv.c can reuse some FPU register
> accessors defined in emulator.c. Move them to a common location so both
> can access them.
>
> While at it, reorder the parameters of these accessor methods to make
> them more readable.
>
> Cc: Alexander Graf <graf@amazon.com>
> Cc: Evgeny Iakovlev <eyakovl@amazon.de>
> Signed-off-by: Siddharth Chandrasekaran <sidcha@amazon.de>
> ---
>  arch/x86/kvm/emulate.c | 138 ++++++----------------------------------
>  arch/x86/kvm/fpu.h     | 140 +++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 158 insertions(+), 120 deletions(-)
>  create mode 100644 arch/x86/kvm/fpu.h
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index f7970ba6219f..296f8f3ce988 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -22,7 +22,6 @@
>  #include "kvm_cache_regs.h"
>  #include "kvm_emulate.h"
>  #include <linux/stringify.h>
> -#include <asm/fpu/api.h>
>  #include <asm/debugreg.h>
>  #include <asm/nospec-branch.h>
>  
> @@ -30,6 +29,7 @@
>  #include "tss.h"
>  #include "mmu.h"
>  #include "pmu.h"
> +#include "fpu.h"
>  
>  /*
>   * Operand types
> @@ -1081,116 +1081,14 @@ static void fetch_register_operand(struct operand *op)
>  	}
>  }
>  
> -static void emulator_get_fpu(void)
> -{
> -	fpregs_lock();
> -
> -	fpregs_assert_state_consistent();
> -	if (test_thread_flag(TIF_NEED_FPU_LOAD))
> -		switch_fpu_return();
> -}
> -
> -static void emulator_put_fpu(void)
> -{
> -	fpregs_unlock();
> -}
> -
> -static void read_sse_reg(sse128_t *data, int reg)
> -{
> -	emulator_get_fpu();
> -	switch (reg) {
> -	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
> -	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
> -	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
> -	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
> -	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
> -	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
> -	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
> -	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
> -#ifdef CONFIG_X86_64
> -	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
> -	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
> -	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
> -	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
> -	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
> -	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
> -	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
> -	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
> -#endif
> -	default: BUG();
> -	}
> -	emulator_put_fpu();
> -}
> -
> -static void write_sse_reg(sse128_t *data, int reg)
> -{
> -	emulator_get_fpu();
> -	switch (reg) {
> -	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
> -	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
> -	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
> -	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
> -	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
> -	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
> -	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
> -	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
> -#ifdef CONFIG_X86_64
> -	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
> -	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
> -	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
> -	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
> -	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
> -	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
> -	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
> -	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
> -#endif
> -	default: BUG();
> -	}
> -	emulator_put_fpu();
> -}
> -
> -static void read_mmx_reg(u64 *data, int reg)
> -{
> -	emulator_get_fpu();
> -	switch (reg) {
> -	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
> -	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
> -	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
> -	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
> -	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
> -	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
> -	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
> -	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
> -	default: BUG();
> -	}
> -	emulator_put_fpu();
> -}
> -
> -static void write_mmx_reg(u64 *data, int reg)
> -{
> -	emulator_get_fpu();
> -	switch (reg) {
> -	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
> -	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
> -	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
> -	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
> -	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
> -	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
> -	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
> -	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
> -	default: BUG();
> -	}
> -	emulator_put_fpu();
> -}
> -
>  static int em_fninit(struct x86_emulate_ctxt *ctxt)
>  {
>  	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
>  		return emulate_nm(ctxt);
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  	asm volatile("fninit");
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  	return X86EMUL_CONTINUE;
>  }
>  
> @@ -1201,9 +1099,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
>  	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
>  		return emulate_nm(ctxt);
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  	asm volatile("fnstcw %0": "+m"(fcw));
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  
>  	ctxt->dst.val = fcw;
>  
> @@ -1217,9 +1115,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
>  	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
>  		return emulate_nm(ctxt);
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  	asm volatile("fnstsw %0": "+m"(fsw));
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  
>  	ctxt->dst.val = fsw;
>  
> @@ -1238,7 +1136,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
>  		op->type = OP_XMM;
>  		op->bytes = 16;
>  		op->addr.xmm = reg;
> -		read_sse_reg(&op->vec_val, reg);
> +		kvm_read_sse_reg(reg, &op->vec_val);
>  		return;
>  	}
>  	if (ctxt->d & Mmx) {
> @@ -1289,7 +1187,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
>  			op->type = OP_XMM;
>  			op->bytes = 16;
>  			op->addr.xmm = ctxt->modrm_rm;
> -			read_sse_reg(&op->vec_val, ctxt->modrm_rm);
> +			kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
>  			return rc;
>  		}
>  		if (ctxt->d & Mmx) {
> @@ -1866,10 +1764,10 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
>  				       op->bytes * op->count);
>  		break;
>  	case OP_XMM:
> -		write_sse_reg(&op->vec_val, op->addr.xmm);
> +		kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
>  		break;
>  	case OP_MM:
> -		write_mmx_reg(&op->mm_val, op->addr.mm);
> +		kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
>  		break;
>  	case OP_NONE:
>  		/* no writeback */
> @@ -4124,11 +4022,11 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
>  	if (rc != X86EMUL_CONTINUE)
>  		return rc;
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  
>  	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
>  
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  
>  	if (rc != X86EMUL_CONTINUE)
>  		return rc;
> @@ -4172,7 +4070,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
>  	if (rc != X86EMUL_CONTINUE)
>  		return rc;
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  
>  	if (size < __fxstate_size(16)) {
>  		rc = fxregs_fixup(&fx_state, size);
> @@ -4189,7 +4087,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
>  		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
>  
>  out:
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  
>  	return rc;
>  }
> @@ -5510,9 +5408,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
>  {
>  	int rc;
>  
> -	emulator_get_fpu();
> +	kvm_fpu_get();
>  	rc = asm_safe("fwait");
> -	emulator_put_fpu();
> +	kvm_fpu_put();
>  
>  	if (unlikely(rc != X86EMUL_CONTINUE))
>  		return emulate_exception(ctxt, MF_VECTOR, 0, false);
> @@ -5523,7 +5421,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
>  static void fetch_possible_mmx_operand(struct operand *op)
>  {
>  	if (op->type == OP_MM)
> -		read_mmx_reg(&op->mm_val, op->addr.mm);
> +		kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
>  }
>  
>  static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
> diff --git a/arch/x86/kvm/fpu.h b/arch/x86/kvm/fpu.h
> new file mode 100644
> index 000000000000..3ba12888bf66
> --- /dev/null
> +++ b/arch/x86/kvm/fpu.h
> @@ -0,0 +1,140 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __KVM_FPU_H_
> +#define __KVM_FPU_H_
> +
> +#include <asm/fpu/api.h>
> +
> +typedef u32		__attribute__((vector_size(16))) sse128_t;

Post-patch we seem to have two definitions of 'sse128_t':

$ git grep sse128_t
HEAD~3:arch/x86/kvm/fpu.h:typedef u32           __attribute__((vector_size(16))) sse128_t;
HEAD~3:arch/x86/kvm/fpu.h:#define __sse128_u    union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
HEAD~3:arch/x86/kvm/fpu.h:static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
HEAD~3:arch/x86/kvm/fpu.h:static inline void _kvm_write_sse_reg(int reg, const sse128_t *data)
HEAD~3:arch/x86/kvm/fpu.h:static inline void kvm_read_sse_reg(int reg, sse128_t *data)
HEAD~3:arch/x86/kvm/fpu.h:static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
HEAD~3:arch/x86/kvm/kvm_emulate.h:typedef u32 __attribute__((vector_size(16))) sse128_t;
HEAD~3:arch/x86/kvm/kvm_emulate.h:              char valptr[sizeof(sse128_t)];
HEAD~3:arch/x86/kvm/kvm_emulate.h:              sse128_t vec_val;

Should the one from kvm_emulate.h go away?

> +#define __sse128_u	union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
> +#define sse128_lo(x)	({ __sse128_u t; t.vec = x; t.as_u64[0]; })
> +#define sse128_hi(x)	({ __sse128_u t; t.vec = x; t.as_u64[1]; })
> +#define sse128_l0(x)	({ __sse128_u t; t.vec = x; t.as_u32[0]; })
> +#define sse128_l1(x)	({ __sse128_u t; t.vec = x; t.as_u32[1]; })
> +#define sse128_l2(x)	({ __sse128_u t; t.vec = x; t.as_u32[2]; })
> +#define sse128_l3(x)	({ __sse128_u t; t.vec = x; t.as_u32[3]; })
> +#define sse128(lo, hi)	({ __sse128_u t; t.as_u64[0] = lo; t.as_u64[1] = hi; t.vec; })
> +
> +static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
> +{
> +	switch (reg) {
> +	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
> +	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
> +	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
> +	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
> +	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
> +	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
> +	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
> +	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
> +#ifdef CONFIG_X86_64
> +	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
> +	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
> +	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
> +	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
> +	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
> +	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
> +	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
> +	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
> +#endif
> +	default: BUG();
> +	}
> +}
> +
> +static inline void _kvm_write_sse_reg(int reg, const sse128_t *data)
> +{
> +	switch (reg) {
> +	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
> +	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
> +	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
> +	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
> +	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
> +	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
> +	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
> +	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
> +#ifdef CONFIG_X86_64
> +	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
> +	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
> +	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
> +	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
> +	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
> +	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
> +	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
> +	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
> +#endif
> +	default: BUG();
> +	}
> +}
> +
> +static inline void _kvm_read_mmx_reg(int reg, u64 *data)
> +{
> +	switch (reg) {
> +	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
> +	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
> +	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
> +	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
> +	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
> +	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
> +	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
> +	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
> +	default: BUG();
> +	}
> +}
> +
> +static inline void _kvm_write_mmx_reg(int reg, const u64 *data)
> +{
> +	switch (reg) {
> +	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
> +	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
> +	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
> +	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
> +	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
> +	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
> +	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
> +	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
> +	default: BUG();
> +	}
> +}
> +
> +static inline void kvm_fpu_get(void)
> +{
> +	fpregs_lock();
> +
> +	fpregs_assert_state_consistent();
> +	if (test_thread_flag(TIF_NEED_FPU_LOAD))
> +		switch_fpu_return();
> +}
> +
> +static inline void kvm_fpu_put(void)
> +{
> +	fpregs_unlock();
> +}
> +
> +static inline void kvm_read_sse_reg(int reg, sse128_t *data)
> +{
> +	kvm_fpu_get();
> +	_kvm_read_sse_reg(reg, data);
> +	kvm_fpu_put();
> +}
> +
> +static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
> +{
> +	kvm_fpu_get();
> +	_kvm_write_sse_reg(reg, data);
> +	kvm_fpu_put();
> +}
> +
> +static inline void kvm_read_mmx_reg(int reg, u64 *data)
> +{
> +	kvm_fpu_get();
> +	_kvm_read_mmx_reg(reg, data);
> +	kvm_fpu_put();
> +}
> +
> +static inline void kvm_write_mmx_reg(int reg, const u64 *data)
> +{
> +	kvm_fpu_get();
> +	_kvm_write_mmx_reg(reg, data);
> +	kvm_fpu_put();
> +}
> +
> +#endif

-- 
Vitaly


  reply	other threads:[~2021-04-13 13:41 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-12 17:00 [PATCH v2 0/4] Add support for XMM fast hypercalls Siddharth Chandrasekaran
2021-04-12 17:00 ` [PATCH v2 1/4] KVM: x86: Move FPU register accessors into fpu.h Siddharth Chandrasekaran
2021-04-13 13:40   ` Vitaly Kuznetsov [this message]
2021-04-13 13:46     ` Siddharth Chandrasekaran
2021-04-12 17:00 ` [PATCH v2 2/4] KVM: hyper-v: Collect hypercall params into struct Siddharth Chandrasekaran
2021-04-13 13:53   ` Vitaly Kuznetsov
2021-04-13 14:11     ` Siddharth Chandrasekaran
2021-04-12 17:00 ` [PATCH v2 3/4] KVM: x86: kvm_hv_flush_tlb use inputs from XMM registers Siddharth Chandrasekaran
2021-04-12 20:13   ` Wei Liu
2021-04-13  9:09     ` Siddharth Chandrasekaran
2021-04-13 14:09   ` Vitaly Kuznetsov
2021-04-13 21:07     ` Siddharth Chandrasekaran
2021-04-12 17:00 ` [PATCH v2 4/4] KVM: hyper-v: Advertise support for fast XMM hypercalls Siddharth Chandrasekaran
2021-04-12 20:14   ` Wei Liu
2021-04-13  9:11     ` Siddharth Chandrasekaran
2021-04-13 14:26   ` Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87y2dm5ml3.fsf@vitty.brq.redhat.com \
    --to=vkuznets@redhat.com \
    --cc=bp@alien8.de \
    --cc=eyakovl@amazon.de \
    --cc=graf@amazon.com \
    --cc=haiyangz@microsoft.com \
    --cc=hpa@zytor.com \
    --cc=iaslan@amazon.de \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=kys@microsoft.com \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=liran@amazon.com \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=sidcha@amazon.de \
    --cc=sthemmin@microsoft.com \
    --cc=tglx@linutronix.de \
    --cc=wanpengli@tencent.com \
    --cc=wei.liu@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).