From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751932AbbEMGNN (ORCPT ); Wed, 13 May 2015 02:13:13 -0400 Received: from e23smtp04.au.ibm.com ([202.81.31.146]:57995 "EHLO e23smtp04.au.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751519AbbEMGNL (ORCPT ); Wed, 13 May 2015 02:13:11 -0400 Date: Wed, 13 May 2015 16:12:07 +1000 From: Gavin Shan To: Alexey Kardashevskiy Cc: linuxppc-dev@lists.ozlabs.org, David Gibson , Benjamin Herrenschmidt , Paul Mackerras , Alex Williamson , Gavin Shan , Wei Yang , linux-kernel@vger.kernel.org Subject: Re: [PATCH kernel v10 08/34] vfio: powerpc/spapr: Use it_page_size Message-ID: <20150513061207.GB6283@gwshan> Reply-To: Gavin Shan References: <1431358763-24371-1-git-send-email-aik@ozlabs.ru> <1431358763-24371-9-git-send-email-aik@ozlabs.ru> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1431358763-24371-9-git-send-email-aik@ozlabs.ru> User-Agent: Mutt/1.5.23 (2014-03-12) X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 15051306-0013-0000-0000-0000013DE21E Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Tue, May 12, 2015 at 01:38:57AM +1000, Alexey Kardashevskiy wrote: >This makes use of the it_page_size from the iommu_table struct >as page size can differ. > >This replaces missing IOMMU_PAGE_SHIFT macro in commented debug code >as recently introduced IOMMU_PAGE_XXX macros do not include >IOMMU_PAGE_SHIFT. > >Signed-off-by: Alexey Kardashevskiy >Reviewed-by: David Gibson >[aw: for the vfio related changes] >Acked-by: Alex Williamson Reviewed-by: Gavin Shan Thanks, Gavin >--- > drivers/vfio/vfio_iommu_spapr_tce.c | 26 +++++++++++++------------- > 1 file changed, 13 insertions(+), 13 deletions(-) > >diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c >index 735b308..64300cc 100644 >--- a/drivers/vfio/vfio_iommu_spapr_tce.c >+++ b/drivers/vfio/vfio_iommu_spapr_tce.c >@@ -91,7 +91,7 @@ static int tce_iommu_enable(struct tce_container *container) > * enforcing the limit based on the max that the guest can map. > */ > down_write(¤t->mm->mmap_sem); >- npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; >+ npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT; > locked = current->mm->locked_vm + npages; > lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { >@@ -120,7 +120,7 @@ static void tce_iommu_disable(struct tce_container *container) > > down_write(¤t->mm->mmap_sem); > current->mm->locked_vm -= (container->tbl->it_size << >- IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; >+ container->tbl->it_page_shift) >> PAGE_SHIFT; > up_write(¤t->mm->mmap_sem); > } > >@@ -215,7 +215,7 @@ static long tce_iommu_build(struct tce_container *container, > tce, ret); > break; > } >- tce += IOMMU_PAGE_SIZE_4K; >+ tce += IOMMU_PAGE_SIZE(tbl); > } > > if (ret) >@@ -260,8 +260,8 @@ static long tce_iommu_ioctl(void *iommu_data, > if (info.argsz < minsz) > return -EINVAL; > >- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K; >- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K; >+ info.dma32_window_start = tbl->it_offset << tbl->it_page_shift; >+ info.dma32_window_size = tbl->it_size << tbl->it_page_shift; > info.flags = 0; > > if (copy_to_user((void __user *)arg, &info, minsz)) >@@ -291,8 +291,8 @@ static long tce_iommu_ioctl(void *iommu_data, > VFIO_DMA_MAP_FLAG_WRITE)) > return -EINVAL; > >- if ((param.size & ~IOMMU_PAGE_MASK_4K) || >- (param.vaddr & ~IOMMU_PAGE_MASK_4K)) >+ if ((param.size & ~IOMMU_PAGE_MASK(tbl)) || >+ (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) > return -EINVAL; > > /* iova is checked by the IOMMU API */ >@@ -307,8 +307,8 @@ static long tce_iommu_ioctl(void *iommu_data, > return ret; > > ret = tce_iommu_build(container, tbl, >- param.iova >> IOMMU_PAGE_SHIFT_4K, >- tce, param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.iova >> tbl->it_page_shift, >+ tce, param.size >> tbl->it_page_shift); > > iommu_flush_tce(tbl); > >@@ -334,17 +334,17 @@ static long tce_iommu_ioctl(void *iommu_data, > if (param.flags) > return -EINVAL; > >- if (param.size & ~IOMMU_PAGE_MASK_4K) >+ if (param.size & ~IOMMU_PAGE_MASK(tbl)) > return -EINVAL; > > ret = iommu_tce_clear_param_check(tbl, param.iova, 0, >- param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.size >> tbl->it_page_shift); > if (ret) > return ret; > > ret = tce_iommu_clear(container, tbl, >- param.iova >> IOMMU_PAGE_SHIFT_4K, >- param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.iova >> tbl->it_page_shift, >+ param.size >> tbl->it_page_shift); > iommu_flush_tce(tbl); > > return ret; >-- >2.4.0.rc3.8.gfb3e7d5 > From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from e23smtp03.au.ibm.com (e23smtp03.au.ibm.com [202.81.31.145]) (using TLSv1 with cipher CAMELLIA256-SHA (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id C9CCA1A001B for ; Wed, 13 May 2015 16:13:08 +1000 (AEST) Received: from /spool/local by e23smtp03.au.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Wed, 13 May 2015 16:13:08 +1000 Received: from d23relay08.au.ibm.com (d23relay08.au.ibm.com [9.185.71.33]) by d23dlp03.au.ibm.com (Postfix) with ESMTP id 8619A357804F for ; Wed, 13 May 2015 16:13:05 +1000 (EST) Received: from d23av04.au.ibm.com (d23av04.au.ibm.com [9.190.235.139]) by d23relay08.au.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id t4D6Cvgu43843806 for ; Wed, 13 May 2015 16:13:05 +1000 Received: from d23av04.au.ibm.com (localhost [127.0.0.1]) by d23av04.au.ibm.com (8.14.4/8.14.4/NCO v10.0 AVout) with ESMTP id t4D6CW4J004407 for ; Wed, 13 May 2015 16:12:33 +1000 Date: Wed, 13 May 2015 16:12:07 +1000 From: Gavin Shan To: Alexey Kardashevskiy Subject: Re: [PATCH kernel v10 08/34] vfio: powerpc/spapr: Use it_page_size Message-ID: <20150513061207.GB6283@gwshan> Reply-To: Gavin Shan References: <1431358763-24371-1-git-send-email-aik@ozlabs.ru> <1431358763-24371-9-git-send-email-aik@ozlabs.ru> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii In-Reply-To: <1431358763-24371-9-git-send-email-aik@ozlabs.ru> Cc: Wei Yang , Gavin Shan , linux-kernel@vger.kernel.org, Alex Williamson , Paul Mackerras , linuxppc-dev@lists.ozlabs.org, David Gibson List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , On Tue, May 12, 2015 at 01:38:57AM +1000, Alexey Kardashevskiy wrote: >This makes use of the it_page_size from the iommu_table struct >as page size can differ. > >This replaces missing IOMMU_PAGE_SHIFT macro in commented debug code >as recently introduced IOMMU_PAGE_XXX macros do not include >IOMMU_PAGE_SHIFT. > >Signed-off-by: Alexey Kardashevskiy >Reviewed-by: David Gibson >[aw: for the vfio related changes] >Acked-by: Alex Williamson Reviewed-by: Gavin Shan Thanks, Gavin >--- > drivers/vfio/vfio_iommu_spapr_tce.c | 26 +++++++++++++------------- > 1 file changed, 13 insertions(+), 13 deletions(-) > >diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c >index 735b308..64300cc 100644 >--- a/drivers/vfio/vfio_iommu_spapr_tce.c >+++ b/drivers/vfio/vfio_iommu_spapr_tce.c >@@ -91,7 +91,7 @@ static int tce_iommu_enable(struct tce_container *container) > * enforcing the limit based on the max that the guest can map. > */ > down_write(¤t->mm->mmap_sem); >- npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; >+ npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT; > locked = current->mm->locked_vm + npages; > lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { >@@ -120,7 +120,7 @@ static void tce_iommu_disable(struct tce_container *container) > > down_write(¤t->mm->mmap_sem); > current->mm->locked_vm -= (container->tbl->it_size << >- IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; >+ container->tbl->it_page_shift) >> PAGE_SHIFT; > up_write(¤t->mm->mmap_sem); > } > >@@ -215,7 +215,7 @@ static long tce_iommu_build(struct tce_container *container, > tce, ret); > break; > } >- tce += IOMMU_PAGE_SIZE_4K; >+ tce += IOMMU_PAGE_SIZE(tbl); > } > > if (ret) >@@ -260,8 +260,8 @@ static long tce_iommu_ioctl(void *iommu_data, > if (info.argsz < minsz) > return -EINVAL; > >- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K; >- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K; >+ info.dma32_window_start = tbl->it_offset << tbl->it_page_shift; >+ info.dma32_window_size = tbl->it_size << tbl->it_page_shift; > info.flags = 0; > > if (copy_to_user((void __user *)arg, &info, minsz)) >@@ -291,8 +291,8 @@ static long tce_iommu_ioctl(void *iommu_data, > VFIO_DMA_MAP_FLAG_WRITE)) > return -EINVAL; > >- if ((param.size & ~IOMMU_PAGE_MASK_4K) || >- (param.vaddr & ~IOMMU_PAGE_MASK_4K)) >+ if ((param.size & ~IOMMU_PAGE_MASK(tbl)) || >+ (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) > return -EINVAL; > > /* iova is checked by the IOMMU API */ >@@ -307,8 +307,8 @@ static long tce_iommu_ioctl(void *iommu_data, > return ret; > > ret = tce_iommu_build(container, tbl, >- param.iova >> IOMMU_PAGE_SHIFT_4K, >- tce, param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.iova >> tbl->it_page_shift, >+ tce, param.size >> tbl->it_page_shift); > > iommu_flush_tce(tbl); > >@@ -334,17 +334,17 @@ static long tce_iommu_ioctl(void *iommu_data, > if (param.flags) > return -EINVAL; > >- if (param.size & ~IOMMU_PAGE_MASK_4K) >+ if (param.size & ~IOMMU_PAGE_MASK(tbl)) > return -EINVAL; > > ret = iommu_tce_clear_param_check(tbl, param.iova, 0, >- param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.size >> tbl->it_page_shift); > if (ret) > return ret; > > ret = tce_iommu_clear(container, tbl, >- param.iova >> IOMMU_PAGE_SHIFT_4K, >- param.size >> IOMMU_PAGE_SHIFT_4K); >+ param.iova >> tbl->it_page_shift, >+ param.size >> tbl->it_page_shift); > iommu_flush_tce(tbl); > > return ret; >-- >2.4.0.rc3.8.gfb3e7d5 >