All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Edmondson <david.edmondson@oracle.com>
To: Yang Zhong <yang.zhong@intel.com>
Cc: kevin.tian@intel.com, seanjc@google.com,
	jing2.liu@linux.intel.com, qemu-devel@nongnu.org,
	wei.w.wang@intel.com, pbonzini@redhat.com, guang.zeng@intel.com
Subject: Re: [PATCH v2 6/8] x86: add support for KVM_CAP_XSAVE2 and AMX state migration
Date: Mon, 21 Feb 2022 13:25:53 +0000	[thread overview]
Message-ID: <cunv8x8nyfy.fsf@oracle.com> (raw)
In-Reply-To: <20220217060434.52460-7-yang.zhong@intel.com> (Yang Zhong's message of "Wed, 16 Feb 2022 22:04:32 -0800")

On Wednesday, 2022-02-16 at 22:04:32 -08, Yang Zhong wrote:

> From: Jing Liu <jing2.liu@intel.com>
>
> When dynamic xfeatures (e.g. AMX) are used by the guest, the xsave
> area would be larger than 4KB. KVM_GET_XSAVE2 and KVM_SET_XSAVE
> under KVM_CAP_XSAVE2 works with a xsave buffer larger than 4KB.
> Always use the new ioctls under KVM_CAP_XSAVE2 when KVM supports it.
>
> Signed-off-by: Jing Liu <jing2.liu@intel.com>
> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> Signed-off-by: Yang Zhong <yang.zhong@intel.com>
> ---
>  target/i386/cpu.h          |  4 ++++
>  target/i386/kvm/kvm.c      | 42 ++++++++++++++++++++++++--------------
>  target/i386/xsave_helper.c | 33 ++++++++++++++++++++++++++++++
>  3 files changed, 64 insertions(+), 15 deletions(-)
>
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index f7fc2e97a6..de9da38e42 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1528,6 +1528,10 @@ typedef struct CPUX86State {
>      uint64_t opmask_regs[NB_OPMASK_REGS];
>      YMMReg zmmh_regs[CPU_NB_REGS];
>      ZMMReg hi16_zmm_regs[CPU_NB_REGS];
> +#ifdef TARGET_X86_64
> +    uint8_t xtilecfg[64];
> +    uint8_t xtiledata[8192];
> +#endif

Can we have defined constants for these sizes? They also appear in patch
2.

>
>      /* sysenter registers */
>      uint32_t sysenter_cs;
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index 8562d3d138..ff064e3d8f 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -122,6 +122,7 @@ static uint32_t num_architectural_pmu_gp_counters;
>  static uint32_t num_architectural_pmu_fixed_counters;
>
>  static int has_xsave;
> +static int has_xsave2;
>  static int has_xcrs;
>  static int has_pit_state2;
>  static int has_sregs2;
> @@ -1585,6 +1586,26 @@ static Error *invtsc_mig_blocker;
>
>  #define KVM_MAX_CPUID_ENTRIES  100
>
> +static void kvm_init_xsave(CPUX86State *env)
> +{
> +    if (has_xsave2) {
> +        env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);

Idle curiosity - why do we round this up?

> +    } else if (has_xsave) {
> +        env->xsave_buf_len = sizeof(struct kvm_xsave);
> +    } else {
> +        return;
> +    }
> +
> +    env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
> +    memset(env->xsave_buf, 0, env->xsave_buf_len);
> +     /*
> +      * The allocated storage must be large enough for all of the
> +      * possible XSAVE state components.
> +      */
> +    assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
> +           env->xsave_buf_len);
> +}
> +
>  int kvm_arch_init_vcpu(CPUState *cs)
>  {
>      struct {
> @@ -1614,6 +1635,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
>
>      cpuid_i = 0;
>
> +    has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
> +
>      r = kvm_arch_set_tsc_khz(cs);
>      if (r < 0) {
>          return r;
> @@ -2003,19 +2026,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
>      if (r) {
>          goto fail;
>      }
> -
> -    if (has_xsave) {
> -        env->xsave_buf_len = sizeof(struct kvm_xsave);
> -        env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
> -        memset(env->xsave_buf, 0, env->xsave_buf_len);
> -
> -        /*
> -         * The allocated storage must be large enough for all of the
> -         * possible XSAVE state components.
> -         */
> -        assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
> -               <= env->xsave_buf_len);
> -    }
> +    kvm_init_xsave(env);
>
>      max_nested_state_len = kvm_max_nested_state_length();
>      if (max_nested_state_len > 0) {
> @@ -3319,13 +3330,14 @@ static int kvm_get_xsave(X86CPU *cpu)
>  {
>      CPUX86State *env = &cpu->env;
>      void *xsave = env->xsave_buf;
> -    int ret;
> +    int type, ret;
>
>      if (!has_xsave) {
>          return kvm_get_fpu(cpu);
>      }
>
> -    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
> +    type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
> +    ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
>      if (ret < 0) {
>          return ret;
>      }
> diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
> index ac61a96344..b6a004505f 100644
> --- a/target/i386/xsave_helper.c
> +++ b/target/i386/xsave_helper.c
> @@ -5,6 +5,7 @@
>  #include "qemu/osdep.h"
>
>  #include "cpu.h"
> +#include <asm/kvm.h>
>
>  void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
>  {
> @@ -126,6 +127,22 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
>
>          memcpy(pkru, &env->pkru, sizeof(env->pkru));
>      }
> +
> +    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
> +    if (e->size && e->offset) {
> +        XSaveXTILECFG *tilecfg = buf + e->offset;
> +
> +        memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg));
> +    }
> +
> +    if (buflen > sizeof(struct kvm_xsave)) {
> +        e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
> +        if (e->size && e->offset && buflen >= e->size + e->offset) {
> +            XSaveXTILEDATA *tiledata = buf + e->offset;
> +
> +            memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata));
> +        }
> +    }
>  #endif
>  }
>
> @@ -247,5 +264,21 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
>          pkru = buf + e->offset;
>          memcpy(&env->pkru, pkru, sizeof(env->pkru));
>      }
> +
> +    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
> +    if (e->size && e->offset) {
> +        const XSaveXTILECFG *tilecfg = buf + e->offset;
> +
> +        memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg));
> +    }
> +
> +    if (buflen > sizeof(struct kvm_xsave)) {
> +        e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
> +        if (e->size && e->offset && buflen >= e->size + e->offset) {
> +            const XSaveXTILEDATA *tiledata = buf + e->offset;
> +
> +            memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata));
> +        }
> +    }
>  #endif
>  }

dme.
-- 
Why does it have to be like this? I can never tell.


  reply	other threads:[~2022-02-21 13:52 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-17  6:04 [PATCH v2 0/8] AMX support in Qemu Yang Zhong
2022-02-17  6:04 ` [PATCH v2 1/8] x86: Fix the 64-byte boundary enumeration for extended state Yang Zhong
2022-02-21 12:51   ` David Edmondson
2022-02-17  6:04 ` [PATCH v2 2/8] x86: Add AMX XTILECFG and XTILEDATA components Yang Zhong
2022-02-21 12:53   ` David Edmondson
2022-02-17  6:04 ` [PATCH v2 3/8] x86: Grant AMX permission for guest Yang Zhong
2022-02-17  5:58   ` Yang Zhong
2022-02-17 13:44     ` Paolo Bonzini
2022-02-25 10:40       ` Yang Zhong
2022-02-17  6:04 ` [PATCH v2 4/8] x86: Add XFD faulting bit for state components Yang Zhong
2022-02-21 13:00   ` David Edmondson
2022-02-25  7:10     ` Yang Zhong
2022-02-17  6:04 ` [PATCH v2 5/8] x86: Add AMX CPUIDs enumeration Yang Zhong
2022-02-23 11:30   ` David Edmondson
2022-02-17  6:04 ` [PATCH v2 6/8] x86: add support for KVM_CAP_XSAVE2 and AMX state migration Yang Zhong
2022-02-21 13:25   ` David Edmondson [this message]
2022-02-25  7:33     ` Yang Zhong
2022-02-17  6:04 ` [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration Yang Zhong
2022-02-21 13:30   ` David Edmondson
2022-02-17  6:04 ` [PATCH v2 8/8] linux-header: Sync the linux headers Yang Zhong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cunv8x8nyfy.fsf@oracle.com \
    --to=david.edmondson@oracle.com \
    --cc=guang.zeng@intel.com \
    --cc=jing2.liu@linux.intel.com \
    --cc=kevin.tian@intel.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=seanjc@google.com \
    --cc=wei.w.wang@intel.com \
    --cc=yang.zhong@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.