All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Eduardo Habkost <ehabkost@redhat.com>, qemu-devel@nongnu.org
Cc: kvm@vger.kernel.org, Huaitong Han <huaitong.han@intel.com>
Subject: Re: [PATCH v2 0/3] target-i386: Use C struct for xsave area layout, offsets & sizes
Date: Tue, 1 Dec 2015 11:22:31 +0100	[thread overview]
Message-ID: <565D74E7.6070403@redhat.com> (raw)
In-Reply-To: <1448904887-4977-1-git-send-email-ehabkost@redhat.com>

On 30/11/2015 18:34, Eduardo Habkost wrote:
> target-i386/cpu.c:ext_save_area uses magic numbers for the xsave
> area offets and sizes, and target-i386/kvm.c:kvm_{put,get}_xsave()
> uses offset macros and bit manipulation to access the xsave area.
> This series changes both to use C structs for those operations.
> 
> I still need to figure out a way to write unit tests for the new
> code. Maybe I will just copy and paste the new and old functions,
> and test them locally (checking if they give the same results
> when translating blobs of random bytes).
> 
> Changes v1 -> v2:
> * Use uint8_t[8*n] instead of uint64_t[n] for register data
> * Keep the QEMU_BUILD_BUG_ON lines
> 
> v1 -> v2 diff below:
> 
>   diff --git a/target-i386/cpu.h b/target-i386/cpu.h
>   index 3d1d01e..41f55ef 100644
>   --- a/target-i386/cpu.h
>   +++ b/target-i386/cpu.h
>   @@ -818,7 +818,7 @@ typedef union X86LegacyXSaveArea {
>            uint32_t mxcsr;
>            uint32_t mxcsr_mask;
>            FPReg fpregs[8];
>   -        uint64_t xmm_regs[16][2];
>   +        uint8_t xmm_regs[16][16];
>        };
>        uint8_t data[512];
>    } X86LegacyXSaveArea;
>   @@ -831,7 +831,7 @@ typedef struct X86XSaveHeader {
> 
>    /* Ext. save area 2: AVX State */
>    typedef struct XSaveAVX {
>   -    uint64_t ymmh[16][2];
>   +    uint8_t ymmh[16][16];
>    } XSaveAVX;
> 
>    /* Ext. save area 3: BNDREG */
>   @@ -852,12 +852,12 @@ typedef struct XSaveOpmask {
> 
>    /* Ext. save area 6: ZMM_Hi256 */
>    typedef struct XSaveZMM_Hi256 {
>   -    uint64_t zmm_hi256[16][4];
>   +    uint8_t zmm_hi256[16][32];
>    } XSaveZMM_Hi256;
> 
>    /* Ext. save area 7: Hi16_ZMM */
>    typedef struct XSaveHi16_ZMM {
>   -    XMMReg hi16_zmm[16];
>   +    uint8_t hi16_zmm[16][64];
>    } XSaveHi16_ZMM;
> 
>    typedef struct X86XSaveArea {
>   diff --git a/target-i386/kvm.c b/target-i386/kvm.c
>   index 5e7ec70..98249e4 100644
>   --- a/target-i386/kvm.c
>   +++ b/target-i386/kvm.c
>   @@ -1203,6 +1203,43 @@ static int kvm_put_fpu(X86CPU *cpu)
>        return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
>    }
> 
>   +#define XSAVE_FCW_FSW     0
>   +#define XSAVE_FTW_FOP     1
>   +#define XSAVE_CWD_RIP     2
>   +#define XSAVE_CWD_RDP     4
>   +#define XSAVE_MXCSR       6
>   +#define XSAVE_ST_SPACE    8
>   +#define XSAVE_XMM_SPACE   40
>   +#define XSAVE_XSTATE_BV   128
>   +#define XSAVE_YMMH_SPACE  144
>   +#define XSAVE_BNDREGS     240
>   +#define XSAVE_BNDCSR      256
>   +#define XSAVE_OPMASK      272
>   +#define XSAVE_ZMM_Hi256   288
>   +#define XSAVE_Hi16_ZMM    416
>   +
>   +#define XSAVE_BYTE_OFFSET(word_offset) \
>   +    ((word_offset)*sizeof(((struct kvm_xsave*)0)->region[0]))
>   +
>   +#define ASSERT_OFFSET(word_offset, field) \
>   +    QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
>   +                      offsetof(X86XSaveArea, field))
>   +
>   +ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
>   +ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
>   +ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
>   +ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
>   +ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
>   +ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
>   +ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
>   +ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
>   +ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
>   +ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
>   +ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
>   +ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
>   +ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
>   +ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
>   +
>    static int kvm_put_xsave(X86CPU *cpu)
>    {
>        CPUX86State *env = &cpu->env;
>   @@ -1239,17 +1276,17 @@ static int kvm_put_xsave(X86CPU *cpu)
>                sizeof env->opmask_regs);
> 
>        for (i = 0; i < CPU_NB_REGS; i++) {
>   -        X86LegacyXSaveArea *legacy = &xsave->legacy;
>   -        XSaveAVX *avx = &xsave->avx_state;
>   -        XSaveZMM_Hi256 *zmm_hi256 = &xsave->zmm_hi256_state;
>   -        stq_p(&legacy->xmm_regs[i][0],     env->xmm_regs[i].XMM_Q(0));
>   -        stq_p(&legacy->xmm_regs[i][1],     env->xmm_regs[i].XMM_Q(1));
>   -        stq_p(&avx->ymmh[i][0],            env->xmm_regs[i].XMM_Q(2));
>   -        stq_p(&avx->ymmh[i][1],            env->xmm_regs[i].XMM_Q(3));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][0], env->xmm_regs[i].XMM_Q(4));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][1], env->xmm_regs[i].XMM_Q(5));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][2], env->xmm_regs[i].XMM_Q(6));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][3], env->xmm_regs[i].XMM_Q(7));
>   +        uint8_t *xmm = xsave->legacy.xmm_regs[i];
>   +        uint8_t *ymmh = xsave->avx_state.ymmh[i];
>   +        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
>   +        stq_p(xmm,     env->xmm_regs[i].XMM_Q(0));
>   +        stq_p(xmm+8,   env->xmm_regs[i].XMM_Q(1));
>   +        stq_p(ymmh,    env->xmm_regs[i].XMM_Q(2));
>   +        stq_p(ymmh+8,  env->xmm_regs[i].XMM_Q(3));
>   +        stq_p(zmmh,    env->xmm_regs[i].XMM_Q(4));
>   +        stq_p(zmmh+8,  env->xmm_regs[i].XMM_Q(5));
>   +        stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
>   +        stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
>        }
> 
>    #ifdef TARGET_X86_64
>   @@ -1625,17 +1662,17 @@ static int kvm_get_xsave(X86CPU *cpu)
>                sizeof env->opmask_regs);
> 
>        for (i = 0; i < CPU_NB_REGS; i++) {
>   -        X86LegacyXSaveArea *legacy = &xsave->legacy;
>   -        XSaveAVX *avx = &xsave->avx_state;
>   -        XSaveZMM_Hi256 *zmm_hi256 = &xsave->zmm_hi256_state;
>   -        env->xmm_regs[i].XMM_Q(0) = ldq_p(&legacy->xmm_regs[i][0]);
>   -        env->xmm_regs[i].XMM_Q(1) = ldq_p(&legacy->xmm_regs[i][1]);
>   -        env->xmm_regs[i].XMM_Q(2) = ldq_p(&avx->ymmh[i][0]);
>   -        env->xmm_regs[i].XMM_Q(3) = ldq_p(&avx->ymmh[i][1]);
>   -        env->xmm_regs[i].XMM_Q(4) = ldq_p(&zmm_hi256->zmm_hi256[i][0]);
>   -        env->xmm_regs[i].XMM_Q(5) = ldq_p(&zmm_hi256->zmm_hi256[i][1]);
>   -        env->xmm_regs[i].XMM_Q(6) = ldq_p(&zmm_hi256->zmm_hi256[i][2]);
>   -        env->xmm_regs[i].XMM_Q(7) = ldq_p(&zmm_hi256->zmm_hi256[i][3]);
>   +        uint8_t *xmm = xsave->legacy.xmm_regs[i];
>   +        uint8_t *ymmh = xsave->avx_state.ymmh[i];
>   +        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
>   +        env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
>   +        env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
>   +        env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
>   +        env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
>   +        env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
>   +        env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
>   +        env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
>   +        env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
>        }
> 
>    #ifdef TARGET_X86_64
> 
> Eduardo Habkost (3):
>   target-i386: Define structs for layout of xsave area
>   target-i386: Use xsave structs for ext_save_area
>   target-i386: kvm: Use X86XSaveArea struct for xsave save/load
> 
>  target-i386/cpu.c | 18 +++++++----
>  target-i386/cpu.h | 85 ++++++++++++++++++++++++++++++++++++++++++++++++
>  target-i386/kvm.c | 96 +++++++++++++++++++++++++++++++++----------------------
>  3 files changed, 155 insertions(+), 44 deletions(-)
> 

The patches are okay, are you going to rebase them on top of the PKRU
patches?

Paolo

WARNING: multiple messages have this Message-ID (diff)
From: Paolo Bonzini <pbonzini@redhat.com>
To: Eduardo Habkost <ehabkost@redhat.com>, qemu-devel@nongnu.org
Cc: Huaitong Han <huaitong.han@intel.com>, kvm@vger.kernel.org
Subject: Re: [Qemu-devel] [PATCH v2 0/3] target-i386: Use C struct for xsave area layout, offsets & sizes
Date: Tue, 1 Dec 2015 11:22:31 +0100	[thread overview]
Message-ID: <565D74E7.6070403@redhat.com> (raw)
In-Reply-To: <1448904887-4977-1-git-send-email-ehabkost@redhat.com>

On 30/11/2015 18:34, Eduardo Habkost wrote:
> target-i386/cpu.c:ext_save_area uses magic numbers for the xsave
> area offets and sizes, and target-i386/kvm.c:kvm_{put,get}_xsave()
> uses offset macros and bit manipulation to access the xsave area.
> This series changes both to use C structs for those operations.
> 
> I still need to figure out a way to write unit tests for the new
> code. Maybe I will just copy and paste the new and old functions,
> and test them locally (checking if they give the same results
> when translating blobs of random bytes).
> 
> Changes v1 -> v2:
> * Use uint8_t[8*n] instead of uint64_t[n] for register data
> * Keep the QEMU_BUILD_BUG_ON lines
> 
> v1 -> v2 diff below:
> 
>   diff --git a/target-i386/cpu.h b/target-i386/cpu.h
>   index 3d1d01e..41f55ef 100644
>   --- a/target-i386/cpu.h
>   +++ b/target-i386/cpu.h
>   @@ -818,7 +818,7 @@ typedef union X86LegacyXSaveArea {
>            uint32_t mxcsr;
>            uint32_t mxcsr_mask;
>            FPReg fpregs[8];
>   -        uint64_t xmm_regs[16][2];
>   +        uint8_t xmm_regs[16][16];
>        };
>        uint8_t data[512];
>    } X86LegacyXSaveArea;
>   @@ -831,7 +831,7 @@ typedef struct X86XSaveHeader {
> 
>    /* Ext. save area 2: AVX State */
>    typedef struct XSaveAVX {
>   -    uint64_t ymmh[16][2];
>   +    uint8_t ymmh[16][16];
>    } XSaveAVX;
> 
>    /* Ext. save area 3: BNDREG */
>   @@ -852,12 +852,12 @@ typedef struct XSaveOpmask {
> 
>    /* Ext. save area 6: ZMM_Hi256 */
>    typedef struct XSaveZMM_Hi256 {
>   -    uint64_t zmm_hi256[16][4];
>   +    uint8_t zmm_hi256[16][32];
>    } XSaveZMM_Hi256;
> 
>    /* Ext. save area 7: Hi16_ZMM */
>    typedef struct XSaveHi16_ZMM {
>   -    XMMReg hi16_zmm[16];
>   +    uint8_t hi16_zmm[16][64];
>    } XSaveHi16_ZMM;
> 
>    typedef struct X86XSaveArea {
>   diff --git a/target-i386/kvm.c b/target-i386/kvm.c
>   index 5e7ec70..98249e4 100644
>   --- a/target-i386/kvm.c
>   +++ b/target-i386/kvm.c
>   @@ -1203,6 +1203,43 @@ static int kvm_put_fpu(X86CPU *cpu)
>        return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
>    }
> 
>   +#define XSAVE_FCW_FSW     0
>   +#define XSAVE_FTW_FOP     1
>   +#define XSAVE_CWD_RIP     2
>   +#define XSAVE_CWD_RDP     4
>   +#define XSAVE_MXCSR       6
>   +#define XSAVE_ST_SPACE    8
>   +#define XSAVE_XMM_SPACE   40
>   +#define XSAVE_XSTATE_BV   128
>   +#define XSAVE_YMMH_SPACE  144
>   +#define XSAVE_BNDREGS     240
>   +#define XSAVE_BNDCSR      256
>   +#define XSAVE_OPMASK      272
>   +#define XSAVE_ZMM_Hi256   288
>   +#define XSAVE_Hi16_ZMM    416
>   +
>   +#define XSAVE_BYTE_OFFSET(word_offset) \
>   +    ((word_offset)*sizeof(((struct kvm_xsave*)0)->region[0]))
>   +
>   +#define ASSERT_OFFSET(word_offset, field) \
>   +    QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
>   +                      offsetof(X86XSaveArea, field))
>   +
>   +ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
>   +ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
>   +ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
>   +ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
>   +ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
>   +ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
>   +ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
>   +ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
>   +ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
>   +ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
>   +ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
>   +ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
>   +ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
>   +ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
>   +
>    static int kvm_put_xsave(X86CPU *cpu)
>    {
>        CPUX86State *env = &cpu->env;
>   @@ -1239,17 +1276,17 @@ static int kvm_put_xsave(X86CPU *cpu)
>                sizeof env->opmask_regs);
> 
>        for (i = 0; i < CPU_NB_REGS; i++) {
>   -        X86LegacyXSaveArea *legacy = &xsave->legacy;
>   -        XSaveAVX *avx = &xsave->avx_state;
>   -        XSaveZMM_Hi256 *zmm_hi256 = &xsave->zmm_hi256_state;
>   -        stq_p(&legacy->xmm_regs[i][0],     env->xmm_regs[i].XMM_Q(0));
>   -        stq_p(&legacy->xmm_regs[i][1],     env->xmm_regs[i].XMM_Q(1));
>   -        stq_p(&avx->ymmh[i][0],            env->xmm_regs[i].XMM_Q(2));
>   -        stq_p(&avx->ymmh[i][1],            env->xmm_regs[i].XMM_Q(3));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][0], env->xmm_regs[i].XMM_Q(4));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][1], env->xmm_regs[i].XMM_Q(5));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][2], env->xmm_regs[i].XMM_Q(6));
>   -        stq_p(&zmm_hi256->zmm_hi256[i][3], env->xmm_regs[i].XMM_Q(7));
>   +        uint8_t *xmm = xsave->legacy.xmm_regs[i];
>   +        uint8_t *ymmh = xsave->avx_state.ymmh[i];
>   +        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
>   +        stq_p(xmm,     env->xmm_regs[i].XMM_Q(0));
>   +        stq_p(xmm+8,   env->xmm_regs[i].XMM_Q(1));
>   +        stq_p(ymmh,    env->xmm_regs[i].XMM_Q(2));
>   +        stq_p(ymmh+8,  env->xmm_regs[i].XMM_Q(3));
>   +        stq_p(zmmh,    env->xmm_regs[i].XMM_Q(4));
>   +        stq_p(zmmh+8,  env->xmm_regs[i].XMM_Q(5));
>   +        stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
>   +        stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
>        }
> 
>    #ifdef TARGET_X86_64
>   @@ -1625,17 +1662,17 @@ static int kvm_get_xsave(X86CPU *cpu)
>                sizeof env->opmask_regs);
> 
>        for (i = 0; i < CPU_NB_REGS; i++) {
>   -        X86LegacyXSaveArea *legacy = &xsave->legacy;
>   -        XSaveAVX *avx = &xsave->avx_state;
>   -        XSaveZMM_Hi256 *zmm_hi256 = &xsave->zmm_hi256_state;
>   -        env->xmm_regs[i].XMM_Q(0) = ldq_p(&legacy->xmm_regs[i][0]);
>   -        env->xmm_regs[i].XMM_Q(1) = ldq_p(&legacy->xmm_regs[i][1]);
>   -        env->xmm_regs[i].XMM_Q(2) = ldq_p(&avx->ymmh[i][0]);
>   -        env->xmm_regs[i].XMM_Q(3) = ldq_p(&avx->ymmh[i][1]);
>   -        env->xmm_regs[i].XMM_Q(4) = ldq_p(&zmm_hi256->zmm_hi256[i][0]);
>   -        env->xmm_regs[i].XMM_Q(5) = ldq_p(&zmm_hi256->zmm_hi256[i][1]);
>   -        env->xmm_regs[i].XMM_Q(6) = ldq_p(&zmm_hi256->zmm_hi256[i][2]);
>   -        env->xmm_regs[i].XMM_Q(7) = ldq_p(&zmm_hi256->zmm_hi256[i][3]);
>   +        uint8_t *xmm = xsave->legacy.xmm_regs[i];
>   +        uint8_t *ymmh = xsave->avx_state.ymmh[i];
>   +        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
>   +        env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
>   +        env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
>   +        env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
>   +        env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
>   +        env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
>   +        env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
>   +        env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
>   +        env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
>        }
> 
>    #ifdef TARGET_X86_64
> 
> Eduardo Habkost (3):
>   target-i386: Define structs for layout of xsave area
>   target-i386: Use xsave structs for ext_save_area
>   target-i386: kvm: Use X86XSaveArea struct for xsave save/load
> 
>  target-i386/cpu.c | 18 +++++++----
>  target-i386/cpu.h | 85 ++++++++++++++++++++++++++++++++++++++++++++++++
>  target-i386/kvm.c | 96 +++++++++++++++++++++++++++++++++----------------------
>  3 files changed, 155 insertions(+), 44 deletions(-)
> 

The patches are okay, are you going to rebase them on top of the PKRU
patches?

Paolo

  parent reply	other threads:[~2015-12-01 10:22 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-30 17:34 [PATCH v2 0/3] target-i386: Use C struct for xsave area layout, offsets & sizes Eduardo Habkost
2015-11-30 17:34 ` [Qemu-devel] " Eduardo Habkost
2015-11-30 17:34 ` [PATCH v2 1/3] target-i386: Define structs for layout of xsave area Eduardo Habkost
2015-11-30 17:34   ` [Qemu-devel] " Eduardo Habkost
2015-11-30 17:34 ` [PATCH v2 2/3] target-i386: Use xsave structs for ext_save_area Eduardo Habkost
2015-11-30 17:34   ` [Qemu-devel] " Eduardo Habkost
2015-11-30 17:34 ` [PATCH v2 3/3] target-i386: kvm: Use X86XSaveArea struct for xsave save/load Eduardo Habkost
2015-11-30 17:34   ` [Qemu-devel] " Eduardo Habkost
2015-12-01 10:22 ` Paolo Bonzini [this message]
2015-12-01 10:22   ` [Qemu-devel] [PATCH v2 0/3] target-i386: Use C struct for xsave area layout, offsets & sizes Paolo Bonzini
2015-12-01 15:27   ` Eduardo Habkost
2015-12-01 15:27     ` [Qemu-devel] " Eduardo Habkost
2015-12-01 15:09 ` Paolo Bonzini
2015-12-01 15:09   ` [Qemu-devel] " Paolo Bonzini
2015-12-01 15:25   ` Eduardo Habkost
2015-12-01 15:25     ` [Qemu-devel] " Eduardo Habkost
2015-12-01 15:28     ` Paolo Bonzini
2015-12-01 15:28       ` [Qemu-devel] " Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=565D74E7.6070403@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=huaitong.han@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.