All of lore.kernel.org
 help / color / mirror / Atom feed
From: Liran Alon <liran.alon@oracle.com>
To: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: kvm@vger.kernel.org, "Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Jon Doron" <arilou@gmail.com>,
	"Sean Christopherson" <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH] KVM: x86: nVMX: allow RSM to restore VMXE CR4 flag
Date: Tue, 26 Mar 2019 15:11:56 +0200	[thread overview]
Message-ID: <DFD60850-CD9B-4FF3-9C46-EF4AF53C76CA@oracle.com> (raw)
In-Reply-To: <20190326130746.28748-1-vkuznets@redhat.com>



> On 26 Mar 2019, at 15:07, Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
> 
> Commit 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against
> SMM") introduced a check to vmx_set_cr4() forbidding to set VMXE from SMM.
> The check is correct, however, there is a special case when RSM is called
> to leave SMM: rsm_enter_protected_mode() is called with HF_SMM_MASK still
> set and in case VMXE was set before entering SMM we're failing to return.
> 
> Resolve the issue by temporary dropping HF_SMM_MASK around set_cr4() calls
> when ops->set_cr() is called from RSM.
> 
> Reported-by: Jon Doron <arilou@gmail.com>
> Suggested-by: Liran Alon <liran.alon@oracle.com>
> Fixes: 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>

Patch looks good to me.
Reviewed-by: Liran Alon <liran.alon@oracle.com>

> ---
> - Instread of putting the temporary HF_SMM_MASK drop to
>  rsm_enter_protected_mode() (as was suggested by Liran), move it to
>  emulator_set_cr() modifying its interface. emulate.c seems to be
>  vcpu-specifics-free at this moment, we may want to keep it this way.
> - It seems that Hyper-V+UEFI on KVM is still broken, I'm observing sporadic
>  hangs even with this patch. These hangs, however, seem to be unrelated to
>  rsm.

Feel free to share details on these hangs ;)

Great work,
-Liran

> ---
> arch/x86/include/asm/kvm_emulate.h |  3 ++-
> arch/x86/kvm/emulate.c             | 27 ++++++++++++++-------------
> arch/x86/kvm/x86.c                 | 12 +++++++++++-
> 3 files changed, 27 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index 93c4bf598fb0..6c33caa82fa5 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -203,7 +203,8 @@ struct x86_emulate_ops {
> 	void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
> 	void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
> 	ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
> -	int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
> +	int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val,
> +		      bool from_rsm);
> 	int (*cpl)(struct x86_emulate_ctxt *ctxt);
> 	int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
> 	int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index c338984c850d..a6204105d4d7 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -2413,7 +2413,7 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
> 		cr3 &= ~0xfff;
> 	}
> 
> -	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
> +	bad = ctxt->ops->set_cr(ctxt, 3, cr3, true);
> 	if (bad)
> 		return X86EMUL_UNHANDLEABLE;
> 
> @@ -2422,20 +2422,20 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
> 	 * Then enable protected mode.	However, PCID cannot be enabled
> 	 * if EFER.LMA=0, so set it separately.
> 	 */
> -	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
> +	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE, true);
> 	if (bad)
> 		return X86EMUL_UNHANDLEABLE;
> 
> -	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
> +	bad = ctxt->ops->set_cr(ctxt, 0, cr0, true);
> 	if (bad)
> 		return X86EMUL_UNHANDLEABLE;
> 
> 	if (cr4 & X86_CR4_PCIDE) {
> -		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
> +		bad = ctxt->ops->set_cr(ctxt, 4, cr4, true);
> 		if (bad)
> 			return X86EMUL_UNHANDLEABLE;
> 		if (pcid) {
> -			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
> +			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid, true);
> 			if (bad)
> 				return X86EMUL_UNHANDLEABLE;
> 		}
> @@ -2581,7 +2581,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
> 
> 		/* Zero CR4.PCIDE before CR0.PG.  */
> 		if (cr4 & X86_CR4_PCIDE) {
> -			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
> +			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE, true);
> 			cr4 &= ~X86_CR4_PCIDE;
> 		}
> 
> @@ -2595,11 +2595,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
> 	/* For the 64-bit case, this will clear EFER.LMA.  */
> 	cr0 = ctxt->ops->get_cr(ctxt, 0);
> 	if (cr0 & X86_CR0_PE)
> -		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
> +		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE),
> +				  true);
> 
> 	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
> 	if (cr4 & X86_CR4_PAE)
> -		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
> +		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE, true);
> 
> 	/* And finally go back to 32-bit mode.  */
> 	efer = 0;
> @@ -3131,7 +3132,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
> 	int ret;
> 	u8 cpl;
> 
> -	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
> +	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3, false))
> 		return emulate_gp(ctxt, 0);
> 	ctxt->_eip = tss->eip;
> 	ctxt->eflags = tss->eflags | 2;
> @@ -3331,7 +3332,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
> 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
> 	}
> 
> -	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
> +	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS, false);
> 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
> 
> 	if (has_error_code) {
> @@ -3633,7 +3634,7 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
> 
> static int em_cr_write(struct x86_emulate_ctxt *ctxt)
> {
> -	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
> +	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val, false))
> 		return emulate_gp(ctxt, 0);
> 
> 	/* Disable writeback. */
> @@ -3766,7 +3767,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
> 
> 	cr0 = ctxt->ops->get_cr(ctxt, 0);
> 	cr0 &= ~X86_CR0_TS;
> -	ctxt->ops->set_cr(ctxt, 0, cr0);
> +	ctxt->ops->set_cr(ctxt, 0, cr0, false);
> 	return X86EMUL_CONTINUE;
> }
> 
> @@ -3866,7 +3867,7 @@ static int em_smsw(struct x86_emulate_ctxt *ctxt)
> static int em_lmsw(struct x86_emulate_ctxt *ctxt)
> {
> 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
> -			  | (ctxt->src.val & 0x0f));
> +			  | (ctxt->src.val & 0x0f), false);
> 	ctxt->dst.type = OP_NONE;
> 	return X86EMUL_CONTINUE;
> }
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a419656521b6..f2745e3170b6 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5739,7 +5739,8 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
> 	return value;
> }
> 
> -static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
> +static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val,
> +			   bool from_rsm)
> {
> 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
> 	int res = 0;
> @@ -5755,7 +5756,16 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
> 		res = kvm_set_cr3(vcpu, val);
> 		break;
> 	case 4:
> +		/*
> +		 * set_cr4() may forbid to set certain flags (e.g. VMXE) from
> +		 * SMM but we're actually leaving it; temporary drop HF_SMM_MASK
> +		 * when setting CR4.
> +		 */
> +		if (from_rsm)
> +			vcpu->arch.hflags &= ~HF_SMM_MASK;
> 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
> +		if (from_rsm)
> +			vcpu->arch.hflags |= HF_SMM_MASK;
> 		break;
> 	case 8:
> 		res = kvm_set_cr8(vcpu, val);
> -- 
> 2.20.1
> 


  reply	other threads:[~2019-03-26 13:12 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-26 13:07 [PATCH] KVM: x86: nVMX: allow RSM to restore VMXE CR4 flag Vitaly Kuznetsov
2019-03-26 13:11 ` Liran Alon [this message]
2019-03-26 13:48   ` Vitaly Kuznetsov
2019-03-26 15:02     ` Liran Alon
2019-03-27 10:08       ` Vitaly Kuznetsov
     [not found] ` <20190327192946.19128-1-sean.j.christopherson@intel.com>
     [not found]   ` <87va03ml7n.fsf@vitty.brq.redhat.com>
     [not found]     ` <CALMp9eQPwFy5GvjDbE9wQQYEDdYfHzEwm6n1XZgQ_hCuk9vp+Q@mail.gmail.com>
2019-04-09  8:21       ` Vitaly Kuznetsov
2019-04-09 16:31         ` Paolo Bonzini
2019-04-10  9:38           ` [RFC] selftests: kvm: add a selftest for SMM Vitaly Kuznetsov
2019-04-10 10:10             ` Paolo Bonzini
2019-04-10 10:32               ` Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DFD60850-CD9B-4FF3-9C46-EF4AF53C76CA@oracle.com \
    --to=liran.alon@oracle.com \
    --cc=arilou@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.