All of lore.kernel.org
 help / color / mirror / Atom feed
From: Fabiano Rosas <farosas@linux.ibm.com>
To: Ravi Bangoria <ravi.bangoria@linux.ibm.com>,
	mpe@ellerman.id.au, paulus@samba.org
Cc: ravi.bangoria@linux.ibm.com, mikey@neuling.org,
	npiggin@gmail.com, leobras.c@gmail.com, pbonzini@redhat.com,
	christophe.leroy@c-s.fr, jniethe5@gmail.com, kvm@vger.kernel.org,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v2 1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1
Date: Wed, 09 Dec 2020 11:25:24 -0300	[thread overview]
Message-ID: <87r1nzgip7.fsf@linux.ibm.com> (raw)
In-Reply-To: <20201124105953.39325-2-ravi.bangoria@linux.ibm.com>

Ravi Bangoria <ravi.bangoria@linux.ibm.com> writes:

> On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
> hcall to load L2 guest state in cpu. L1 hypervisor prepares the
> L2 state in struct hv_guest_state and passes a pointer to it via
> hcall. Using that pointer, L0 reads/writes that state directly
> from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
> of L1. Currently it uses version field to achieve this. i.e. If
> L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
> allow nested kvm guest.
>
> This restriction can be loosen up a bit. L0 can be taught to
> understand older layout of hv_guest_state, if we restrict the
> new member to be added only at the end. i.e. we can allow
> nested guest even when L0 hv_guest_state.version > L1
> hv_guest_state.version. Though, the other way around is not
> possible.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>

> ---
>  arch/powerpc/include/asm/hvcall.h   | 17 +++++++--
>  arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
>  2 files changed, 59 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index fbb377055471..a7073fddb657 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -524,9 +524,12 @@ struct h_cpu_char_result {
>  	u64 behaviour;
>  };
>
> -/* Register state for entering a nested guest with H_ENTER_NESTED */
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
>  struct hv_guest_state {
> -	u64 version;		/* version of this structure layout */
> +	u64 version;		/* version of this structure layout, must be first */
>  	u32 lpid;
>  	u32 vcpu_token;
>  	/* These registers are hypervisor privileged (at least for writing) */
> @@ -560,6 +563,16 @@ struct hv_guest_state {
>  /* Latest version of hv_guest_state structure */
>  #define HV_GUEST_STATE_VERSION	1
>
> +static inline int hv_guest_state_size(unsigned int version)
> +{
> +	switch (version) {
> +	case 1:
> +		return offsetofend(struct hv_guest_state, ppr);
> +	default:
> +		return -1;
> +	}
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  #endif /* __KERNEL__ */
>  #endif /* _ASM_POWERPC_HVCALL_H */
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 33b58549a9aa..2b433c3bacea 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -215,6 +215,45 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
>  	}
>  }
>
> +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					   struct hv_guest_state *l2_hv,
> +					   struct pt_regs *l2_regs,
> +					   u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
> +				sizeof(l2_hv->version)))
> +		return -1;
> +
> +	if (kvmppc_need_byteswap(vcpu))
> +		l2_hv->version = swab64(l2_hv->version);
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
> +				    sizeof(struct pt_regs));
> +}
> +
> +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					    struct hv_guest_state *l2_hv,
> +					    struct pt_regs *l2_regs,
> +					    u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
> +				     sizeof(struct pt_regs));
> +}
> +
>  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  {
>  	long int err, r;
> @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  	hv_ptr = kvmppc_get_gpr(vcpu, 4);
>  	regs_ptr = kvmppc_get_gpr(vcpu, 5);
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
> -				  sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
> -				    sizeof(struct pt_regs));
> +	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					      hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_PARAMETER;
>
>  	if (kvmppc_need_byteswap(vcpu))
>  		byteswap_hv_regs(&l2_hv);
> -	if (l2_hv.version != HV_GUEST_STATE_VERSION)
> +	if (l2_hv.version > HV_GUEST_STATE_VERSION)
>  		return H_P2;
>
>  	if (kvmppc_need_byteswap(vcpu))
> @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  		byteswap_pt_regs(&l2_regs);
>  	}
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
> -				   sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
> -				   sizeof(struct pt_regs));
> +	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					       hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_AUTHORITY;

WARNING: multiple messages have this Message-ID (diff)
From: Fabiano Rosas <farosas@linux.ibm.com>
To: Ravi Bangoria <ravi.bangoria@linux.ibm.com>,
	mpe@ellerman.id.au, paulus@samba.org
Cc: christophe.leroy@c-s.fr, ravi.bangoria@linux.ibm.com,
	mikey@neuling.org, kvm@vger.kernel.org, leobras.c@gmail.com,
	jniethe5@gmail.com, linux-kernel@vger.kernel.org,
	npiggin@gmail.com, kvm-ppc@vger.kernel.org, pbonzini@redhat.com,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v2 1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1
Date: Wed, 09 Dec 2020 11:25:24 -0300	[thread overview]
Message-ID: <87r1nzgip7.fsf@linux.ibm.com> (raw)
In-Reply-To: <20201124105953.39325-2-ravi.bangoria@linux.ibm.com>

Ravi Bangoria <ravi.bangoria@linux.ibm.com> writes:

> On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
> hcall to load L2 guest state in cpu. L1 hypervisor prepares the
> L2 state in struct hv_guest_state and passes a pointer to it via
> hcall. Using that pointer, L0 reads/writes that state directly
> from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
> of L1. Currently it uses version field to achieve this. i.e. If
> L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
> allow nested kvm guest.
>
> This restriction can be loosen up a bit. L0 can be taught to
> understand older layout of hv_guest_state, if we restrict the
> new member to be added only at the end. i.e. we can allow
> nested guest even when L0 hv_guest_state.version > L1
> hv_guest_state.version. Though, the other way around is not
> possible.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>

> ---
>  arch/powerpc/include/asm/hvcall.h   | 17 +++++++--
>  arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
>  2 files changed, 59 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index fbb377055471..a7073fddb657 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -524,9 +524,12 @@ struct h_cpu_char_result {
>  	u64 behaviour;
>  };
>
> -/* Register state for entering a nested guest with H_ENTER_NESTED */
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
>  struct hv_guest_state {
> -	u64 version;		/* version of this structure layout */
> +	u64 version;		/* version of this structure layout, must be first */
>  	u32 lpid;
>  	u32 vcpu_token;
>  	/* These registers are hypervisor privileged (at least for writing) */
> @@ -560,6 +563,16 @@ struct hv_guest_state {
>  /* Latest version of hv_guest_state structure */
>  #define HV_GUEST_STATE_VERSION	1
>
> +static inline int hv_guest_state_size(unsigned int version)
> +{
> +	switch (version) {
> +	case 1:
> +		return offsetofend(struct hv_guest_state, ppr);
> +	default:
> +		return -1;
> +	}
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  #endif /* __KERNEL__ */
>  #endif /* _ASM_POWERPC_HVCALL_H */
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 33b58549a9aa..2b433c3bacea 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -215,6 +215,45 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
>  	}
>  }
>
> +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					   struct hv_guest_state *l2_hv,
> +					   struct pt_regs *l2_regs,
> +					   u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
> +				sizeof(l2_hv->version)))
> +		return -1;
> +
> +	if (kvmppc_need_byteswap(vcpu))
> +		l2_hv->version = swab64(l2_hv->version);
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
> +				    sizeof(struct pt_regs));
> +}
> +
> +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					    struct hv_guest_state *l2_hv,
> +					    struct pt_regs *l2_regs,
> +					    u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
> +				     sizeof(struct pt_regs));
> +}
> +
>  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  {
>  	long int err, r;
> @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  	hv_ptr = kvmppc_get_gpr(vcpu, 4);
>  	regs_ptr = kvmppc_get_gpr(vcpu, 5);
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
> -				  sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
> -				    sizeof(struct pt_regs));
> +	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					      hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_PARAMETER;
>
>  	if (kvmppc_need_byteswap(vcpu))
>  		byteswap_hv_regs(&l2_hv);
> -	if (l2_hv.version != HV_GUEST_STATE_VERSION)
> +	if (l2_hv.version > HV_GUEST_STATE_VERSION)
>  		return H_P2;
>
>  	if (kvmppc_need_byteswap(vcpu))
> @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  		byteswap_pt_regs(&l2_regs);
>  	}
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
> -				   sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
> -				   sizeof(struct pt_regs));
> +	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					       hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_AUTHORITY;

WARNING: multiple messages have this Message-ID (diff)
From: Fabiano Rosas <farosas@linux.ibm.com>
To: Ravi Bangoria <ravi.bangoria@linux.ibm.com>,
	mpe@ellerman.id.au, paulus@samba.org
Cc: ravi.bangoria@linux.ibm.com, mikey@neuling.org,
	npiggin@gmail.com, leobras.c@gmail.com, pbonzini@redhat.com,
	christophe.leroy@c-s.fr, jniethe5@gmail.com, kvm@vger.kernel.org,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v2 1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1
Date: Wed, 09 Dec 2020 14:25:24 +0000	[thread overview]
Message-ID: <87r1nzgip7.fsf@linux.ibm.com> (raw)
In-Reply-To: <20201124105953.39325-2-ravi.bangoria@linux.ibm.com>

Ravi Bangoria <ravi.bangoria@linux.ibm.com> writes:

> On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
> hcall to load L2 guest state in cpu. L1 hypervisor prepares the
> L2 state in struct hv_guest_state and passes a pointer to it via
> hcall. Using that pointer, L0 reads/writes that state directly
> from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
> of L1. Currently it uses version field to achieve this. i.e. If
> L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
> allow nested kvm guest.
>
> This restriction can be loosen up a bit. L0 can be taught to
> understand older layout of hv_guest_state, if we restrict the
> new member to be added only at the end. i.e. we can allow
> nested guest even when L0 hv_guest_state.version > L1
> hv_guest_state.version. Though, the other way around is not
> possible.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>

> ---
>  arch/powerpc/include/asm/hvcall.h   | 17 +++++++--
>  arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
>  2 files changed, 59 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index fbb377055471..a7073fddb657 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -524,9 +524,12 @@ struct h_cpu_char_result {
>  	u64 behaviour;
>  };
>
> -/* Register state for entering a nested guest with H_ENTER_NESTED */
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
>  struct hv_guest_state {
> -	u64 version;		/* version of this structure layout */
> +	u64 version;		/* version of this structure layout, must be first */
>  	u32 lpid;
>  	u32 vcpu_token;
>  	/* These registers are hypervisor privileged (at least for writing) */
> @@ -560,6 +563,16 @@ struct hv_guest_state {
>  /* Latest version of hv_guest_state structure */
>  #define HV_GUEST_STATE_VERSION	1
>
> +static inline int hv_guest_state_size(unsigned int version)
> +{
> +	switch (version) {
> +	case 1:
> +		return offsetofend(struct hv_guest_state, ppr);
> +	default:
> +		return -1;
> +	}
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  #endif /* __KERNEL__ */
>  #endif /* _ASM_POWERPC_HVCALL_H */
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 33b58549a9aa..2b433c3bacea 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -215,6 +215,45 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
>  	}
>  }
>
> +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					   struct hv_guest_state *l2_hv,
> +					   struct pt_regs *l2_regs,
> +					   u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
> +				sizeof(l2_hv->version)))
> +		return -1;
> +
> +	if (kvmppc_need_byteswap(vcpu))
> +		l2_hv->version = swab64(l2_hv->version);
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
> +				    sizeof(struct pt_regs));
> +}
> +
> +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					    struct hv_guest_state *l2_hv,
> +					    struct pt_regs *l2_regs,
> +					    u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
> +				     sizeof(struct pt_regs));
> +}
> +
>  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  {
>  	long int err, r;
> @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  	hv_ptr = kvmppc_get_gpr(vcpu, 4);
>  	regs_ptr = kvmppc_get_gpr(vcpu, 5);
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
> -				  sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
> -				    sizeof(struct pt_regs));
> +	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					      hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_PARAMETER;
>
>  	if (kvmppc_need_byteswap(vcpu))
>  		byteswap_hv_regs(&l2_hv);
> -	if (l2_hv.version != HV_GUEST_STATE_VERSION)
> +	if (l2_hv.version > HV_GUEST_STATE_VERSION)
>  		return H_P2;
>
>  	if (kvmppc_need_byteswap(vcpu))
> @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  		byteswap_pt_regs(&l2_regs);
>  	}
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
> -				   sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
> -				   sizeof(struct pt_regs));
> +	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					       hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_AUTHORITY;

  reply	other threads:[~2020-12-09 14:26 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-24 10:59 [PATCH v2 0/4] KVM: PPC: Power10 2nd DAWR enablement Ravi Bangoria
2020-11-24 11:11 ` Ravi Bangoria
2020-11-24 10:59 ` Ravi Bangoria
2020-11-24 10:59 ` [PATCH v2 1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1 Ravi Bangoria
2020-11-24 11:11   ` Ravi Bangoria
2020-11-24 10:59   ` Ravi Bangoria
2020-12-09 14:25   ` Fabiano Rosas [this message]
2020-12-09 14:25     ` Fabiano Rosas
2020-12-09 14:25     ` Fabiano Rosas
2020-11-24 10:59 ` [PATCH v2 2/4] KVM: PPC: Rename current DAWR macros and variables Ravi Bangoria
2020-11-24 11:11   ` Ravi Bangoria
2020-11-24 10:59   ` Ravi Bangoria
2020-11-24 10:59 ` [PATCH v2 3/4] KVM: PPC: Add infrastructure to support 2nd DAWR Ravi Bangoria
2020-11-24 11:11   ` Ravi Bangoria
2020-11-24 10:59   ` Ravi Bangoria
2020-11-24 10:59 ` [PATCH v2 4/4] KVM: PPC: Introduce new capability for " Ravi Bangoria
2020-11-24 11:11   ` Ravi Bangoria
2020-11-24 10:59   ` Ravi Bangoria
2020-12-09  4:36   ` Paul Mackerras
2020-12-09  4:36     ` Paul Mackerras
2020-12-09  4:36     ` Paul Mackerras

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87r1nzgip7.fsf@linux.ibm.com \
    --to=farosas@linux.ibm.com \
    --cc=christophe.leroy@c-s.fr \
    --cc=jniethe5@gmail.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=leobras.c@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mikey@neuling.org \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    --cc=ravi.bangoria@linux.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.