All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Gibson <david@gibson.dropbear.id.au>
To: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: linuxppc-dev@lists.ozlabs.org, Paul Mackerras <paulus@samba.org>,
	Alexander Graf <agraf@suse.com>,
	kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: Re: [PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers
Date: Tue, 8 Dec 2015 13:18:04 +1100	[thread overview]
Message-ID: <20151208021804.GI20139@voom.fritz.box> (raw)
In-Reply-To: <1442314179-9706-4-git-send-email-aik@ozlabs.ru>

[-- Attachment #1: Type: text/plain, Size: 5925 bytes --]

On Tue, Sep 15, 2015 at 08:49:33PM +1000, Alexey Kardashevskiy wrote:
> This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
> exit path. This allows next patch to add locks nicely.

I don't see a problem with the actual code, but it doesn't seem to
match this description: I still see multiple return statements for
h_put_tce at least.

> This moves the ioba boundaries check to a helper and adds a check for
> least bits which have to be zeros.
> 
> The patch is pretty mechanical (only check for least ioba bits is added)
> so no change in behaviour is expected.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
>  arch/powerpc/kvm/book3s_64_vio_hv.c | 102 +++++++++++++++++++++++-------------
>  1 file changed, 66 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index 89e96b3..8ae12ac 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -35,71 +35,101 @@
>  #include <asm/ppc-opcode.h>
>  #include <asm/kvm_host.h>
>  #include <asm/udbg.h>
> +#include <asm/iommu.h>
>  
>  #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
>  
> +/*
> + * Finds a TCE table descriptor by LIOBN.
> + *
> + * WARNING: This will be called in real or virtual mode on HV KVM and virtual
> + *          mode on PR KVM
> + */
> +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
> +		unsigned long liobn)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvmppc_spapr_tce_table *stt;
> +
> +	list_for_each_entry_rcu_notrace(stt, &kvm->arch.spapr_tce_tables, list)
> +		if (stt->liobn == liobn)
> +			return stt;
> +
> +	return NULL;
> +}
> +
> +/*
> + * Validates IO address.
> + *
> + * WARNING: This will be called in real-mode on HV KVM and virtual
> + *          mode on PR KVM
> + */
> +static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
> +		unsigned long ioba, unsigned long npages)
> +{
> +	unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
> +	unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
> +	unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
> +
> +	if ((ioba & mask) || (size + npages <= idx))
> +		return H_PARAMETER;

Not sure if it's worth a check for overflow in (size+npages) there.

> +
> +	return H_SUCCESS;
> +}
> +
>  /* WARNING: This will be called in real-mode on HV KVM and virtual
>   *          mode on PR KVM
>   */
>  long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  		      unsigned long ioba, unsigned long tce)
>  {
> -	struct kvm *kvm = vcpu->kvm;
> -	struct kvmppc_spapr_tce_table *stt;
> +	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> +	long ret = H_TOO_HARD;
> +	unsigned long idx;
> +	struct page *page;
> +	u64 *tbl;
>  
>  	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
>  	/* 	    liobn, ioba, tce); */
>  
> -	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
> -		if (stt->liobn == liobn) {
> -			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> -			struct page *page;
> -			u64 *tbl;
> +	if (!stt)
> +		return ret;
>  
> -			/* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  window_size=0x%x\n", */
> -			/* 	    liobn, stt, stt->window_size); */
> -			if (ioba >= stt->window_size)
> -				return H_PARAMETER;
> +	ret = kvmppc_ioba_validate(stt, ioba, 1);
> +	if (ret)
> +		return ret;
>  
> -			page = stt->pages[idx / TCES_PER_PAGE];
> -			tbl = (u64 *)page_address(page);
> +	idx = ioba >> SPAPR_TCE_SHIFT;
> +	page = stt->pages[idx / TCES_PER_PAGE];
> +	tbl = (u64 *)page_address(page);
>  
> -			/* FIXME: Need to validate the TCE itself */
> -			/* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
> -			tbl[idx % TCES_PER_PAGE] = tce;
> -			return H_SUCCESS;
> -		}
> -	}
> +	/* FIXME: Need to validate the TCE itself */
> +	/* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
> +	tbl[idx % TCES_PER_PAGE] = tce;
>  
> -	/* Didn't find the liobn, punt it to userspace */
> -	return H_TOO_HARD;
> +	return ret;

So, this relies on the fact that kvmppc_ioba_validate() would have
returned H_SUCCESS some distance above.  This seems rather fragile if
you insert anything else which alters ret in between.  Since this is
the success path, I think it's clearer to explicitly return H_SUCCESS.

>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
>  
>  long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  		      unsigned long ioba)
>  {
> -	struct kvm *kvm = vcpu->kvm;
> -	struct kvmppc_spapr_tce_table *stt;
> +	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> +	long ret = H_TOO_HARD;
>  
> -	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
> -		if (stt->liobn == liobn) {
> +
> +	if (stt) {
> +		ret = kvmppc_ioba_validate(stt, ioba, 1);
> +		if (!ret) {

This relies on the fact that H_SUCCESS == 0, I'm not sure if that's
something we're already doing elsewhere or not.


>  			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> -			struct page *page;
> -			u64 *tbl;
> -
> -			if (ioba >= stt->window_size)
> -				return H_PARAMETER;
> -
> -			page = stt->pages[idx / TCES_PER_PAGE];
> -			tbl = (u64 *)page_address(page);
> +			struct page *page = stt->pages[idx / TCES_PER_PAGE];
> +			u64 *tbl = (u64 *)page_address(page);
>  
>  			vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
> -			return H_SUCCESS;
>  		}
>  	}
>  
> -	/* Didn't find the liobn, punt it to userspace */
> -	return H_TOO_HARD;
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);

-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/~dgibson

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

WARNING: multiple messages have this Message-ID (diff)
From: David Gibson <david@gibson.dropbear.id.au>
To: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: linuxppc-dev@lists.ozlabs.org, Paul Mackerras <paulus@samba.org>,
	Alexander Graf <agraf@suse.com>,
	kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: Re: [PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers
Date: Tue, 08 Dec 2015 02:18:04 +0000	[thread overview]
Message-ID: <20151208021804.GI20139@voom.fritz.box> (raw)
In-Reply-To: <1442314179-9706-4-git-send-email-aik@ozlabs.ru>

[-- Attachment #1: Type: text/plain, Size: 5925 bytes --]

On Tue, Sep 15, 2015 at 08:49:33PM +1000, Alexey Kardashevskiy wrote:
> This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
> exit path. This allows next patch to add locks nicely.

I don't see a problem with the actual code, but it doesn't seem to
match this description: I still see multiple return statements for
h_put_tce at least.

> This moves the ioba boundaries check to a helper and adds a check for
> least bits which have to be zeros.
> 
> The patch is pretty mechanical (only check for least ioba bits is added)
> so no change in behaviour is expected.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
>  arch/powerpc/kvm/book3s_64_vio_hv.c | 102 +++++++++++++++++++++++-------------
>  1 file changed, 66 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index 89e96b3..8ae12ac 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -35,71 +35,101 @@
>  #include <asm/ppc-opcode.h>
>  #include <asm/kvm_host.h>
>  #include <asm/udbg.h>
> +#include <asm/iommu.h>
>  
>  #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
>  
> +/*
> + * Finds a TCE table descriptor by LIOBN.
> + *
> + * WARNING: This will be called in real or virtual mode on HV KVM and virtual
> + *          mode on PR KVM
> + */
> +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
> +		unsigned long liobn)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvmppc_spapr_tce_table *stt;
> +
> +	list_for_each_entry_rcu_notrace(stt, &kvm->arch.spapr_tce_tables, list)
> +		if (stt->liobn == liobn)
> +			return stt;
> +
> +	return NULL;
> +}
> +
> +/*
> + * Validates IO address.
> + *
> + * WARNING: This will be called in real-mode on HV KVM and virtual
> + *          mode on PR KVM
> + */
> +static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
> +		unsigned long ioba, unsigned long npages)
> +{
> +	unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
> +	unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
> +	unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
> +
> +	if ((ioba & mask) || (size + npages <= idx))
> +		return H_PARAMETER;

Not sure if it's worth a check for overflow in (size+npages) there.

> +
> +	return H_SUCCESS;
> +}
> +
>  /* WARNING: This will be called in real-mode on HV KVM and virtual
>   *          mode on PR KVM
>   */
>  long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  		      unsigned long ioba, unsigned long tce)
>  {
> -	struct kvm *kvm = vcpu->kvm;
> -	struct kvmppc_spapr_tce_table *stt;
> +	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> +	long ret = H_TOO_HARD;
> +	unsigned long idx;
> +	struct page *page;
> +	u64 *tbl;
>  
>  	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
>  	/* 	    liobn, ioba, tce); */
>  
> -	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
> -		if (stt->liobn == liobn) {
> -			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> -			struct page *page;
> -			u64 *tbl;
> +	if (!stt)
> +		return ret;
>  
> -			/* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  window_size=0x%x\n", */
> -			/* 	    liobn, stt, stt->window_size); */
> -			if (ioba >= stt->window_size)
> -				return H_PARAMETER;
> +	ret = kvmppc_ioba_validate(stt, ioba, 1);
> +	if (ret)
> +		return ret;
>  
> -			page = stt->pages[idx / TCES_PER_PAGE];
> -			tbl = (u64 *)page_address(page);
> +	idx = ioba >> SPAPR_TCE_SHIFT;
> +	page = stt->pages[idx / TCES_PER_PAGE];
> +	tbl = (u64 *)page_address(page);
>  
> -			/* FIXME: Need to validate the TCE itself */
> -			/* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
> -			tbl[idx % TCES_PER_PAGE] = tce;
> -			return H_SUCCESS;
> -		}
> -	}
> +	/* FIXME: Need to validate the TCE itself */
> +	/* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
> +	tbl[idx % TCES_PER_PAGE] = tce;
>  
> -	/* Didn't find the liobn, punt it to userspace */
> -	return H_TOO_HARD;
> +	return ret;

So, this relies on the fact that kvmppc_ioba_validate() would have
returned H_SUCCESS some distance above.  This seems rather fragile if
you insert anything else which alters ret in between.  Since this is
the success path, I think it's clearer to explicitly return H_SUCCESS.

>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
>  
>  long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  		      unsigned long ioba)
>  {
> -	struct kvm *kvm = vcpu->kvm;
> -	struct kvmppc_spapr_tce_table *stt;
> +	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> +	long ret = H_TOO_HARD;
>  
> -	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
> -		if (stt->liobn == liobn) {
> +
> +	if (stt) {
> +		ret = kvmppc_ioba_validate(stt, ioba, 1);
> +		if (!ret) {

This relies on the fact that H_SUCCESS == 0, I'm not sure if that's
something we're already doing elsewhere or not.


>  			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> -			struct page *page;
> -			u64 *tbl;
> -
> -			if (ioba >= stt->window_size)
> -				return H_PARAMETER;
> -
> -			page = stt->pages[idx / TCES_PER_PAGE];
> -			tbl = (u64 *)page_address(page);
> +			struct page *page = stt->pages[idx / TCES_PER_PAGE];
> +			u64 *tbl = (u64 *)page_address(page);
>  
>  			vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
> -			return H_SUCCESS;
>  		}
>  	}
>  
> -	/* Didn't find the liobn, punt it to userspace */
> -	return H_TOO_HARD;
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);

-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/~dgibson

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

  reply	other threads:[~2015-12-08  2:45 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-15 10:49 [PATCH kernel 0/9] KVM: PPC: Add in-kernel multitce handling Alexey Kardashevskiy
2015-09-15 10:49 ` Alexey Kardashevskiy
2015-09-15 10:49 ` [PATCH kernel 1/9] rcu: Define notrace version of list_for_each_entry_rcu Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  2:05   ` David Gibson
2015-12-08  2:05     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 2/9] KVM: PPC: Make real_vmalloc_addr() public Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  2:08   ` David Gibson
2015-12-08  2:08     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  2:18   ` David Gibson [this message]
2015-12-08  2:18     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 4/9] KVM: PPC: Use RCU for arch.spapr_tce_tables Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  2:35   ` David Gibson
2015-12-08  2:35     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 5/9] KVM: PPC: Account TCE-containing pages in locked_vm Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-11-30  2:06   ` Paul Mackerras
2015-11-30  2:06     ` Paul Mackerras
2015-11-30  5:09     ` Alexey Kardashevskiy
2015-11-30  5:09       ` Alexey Kardashevskiy
2015-12-08  5:18   ` David Gibson
2015-12-08  5:18     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 6/9] KVM: PPC: Replace SPAPR_TCE_SHIFT with IOMMU_PAGE_SHIFT_4K Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  5:19   ` David Gibson
2015-12-08  5:19     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 7/9] KVM: PPC: Move reusable bits of H_PUT_TCE handler to helpers Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  5:27   ` David Gibson
2015-12-08  5:27     ` David Gibson
2015-12-22  7:24     ` Alexey Kardashevskiy
2015-12-22  7:24       ` Alexey Kardashevskiy
2015-09-15 10:49 ` [PATCH kernel 8/9] KVM: Fix KVM_SMI chapter number Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  5:29   ` David Gibson
2015-12-08  5:29     ` David Gibson
2015-09-15 10:49 ` [PATCH kernel 9/9] KVM: PPC: Add support for multiple-TCE hcalls Alexey Kardashevskiy
2015-09-15 10:49   ` Alexey Kardashevskiy
2015-12-08  5:48   ` David Gibson
2015-12-08  5:48     ` David Gibson
2015-12-22  7:42     ` Alexey Kardashevskiy
2015-12-22  7:42       ` Alexey Kardashevskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20151208021804.GI20139@voom.fritz.box \
    --to=david@gibson.dropbear.id.au \
    --cc=agraf@suse.com \
    --cc=aik@ozlabs.ru \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.