From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753751AbbEMGdv (ORCPT ); Wed, 13 May 2015 02:33:51 -0400 Received: from e23smtp08.au.ibm.com ([202.81.31.141]:51415 "EHLO e23smtp08.au.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753145AbbEMGds (ORCPT ); Wed, 13 May 2015 02:33:48 -0400 Date: Wed, 13 May 2015 16:32:42 +1000 From: Gavin Shan To: Alexey Kardashevskiy Cc: linuxppc-dev@lists.ozlabs.org, David Gibson , Benjamin Herrenschmidt , Paul Mackerras , Alex Williamson , Gavin Shan , Wei Yang , linux-kernel@vger.kernel.org Subject: Re: [PATCH kernel v10 11/34] vfio: powerpc/spapr: Moving pinning/unpinning to helpers Message-ID: <20150513063242.GA9547@gwshan> Reply-To: Gavin Shan References: <1431358763-24371-1-git-send-email-aik@ozlabs.ru> <1431358763-24371-12-git-send-email-aik@ozlabs.ru> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1431358763-24371-12-git-send-email-aik@ozlabs.ru> User-Agent: Mutt/1.5.23 (2014-03-12) X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 15051306-0029-0000-0000-000001886F79 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Tue, May 12, 2015 at 01:39:00AM +1000, Alexey Kardashevskiy wrote: >This is a pretty mechanical patch to make next patches simpler. > >New tce_iommu_unuse_page() helper does put_page() now but it might skip >that after the memory registering patch applied. > >As we are here, this removes unnecessary checks for a value returned >by pfn_to_page() as it cannot possibly return NULL. > >This moves tce_iommu_disable() later to let tce_iommu_clear() know if >the container has been enabled because if it has not been, then >put_page() must not be called on TCEs from the TCE table. This situation >is not yet possible but it will after KVM acceleration patchset is >applied. > >This changes code to work with physical addresses rather than linear >mapping addresses for better code readability. Following patches will >add an xchg() callback for an IOMMU table which will accept/return >physical addresses (unlike current tce_build()) which will eliminate >redundant conversions. > >Signed-off-by: Alexey Kardashevskiy >[aw: for the vfio related changes] >Acked-by: Alex Williamson >Reviewed-by: David Gibson Reviewed-by: Gavin Shan >--- >Changes: >v9: >* changed helpers to work with physical addresses rather than linear >(for simplicity - later ::xchg() will receive physical and avoid >additional convertions) > >v6: >* tce_get_hva() returns hva via a pointer >--- > drivers/vfio/vfio_iommu_spapr_tce.c | 61 +++++++++++++++++++++++++------------ > 1 file changed, 41 insertions(+), 20 deletions(-) > >diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c >index e21479c..115d5e6 100644 >--- a/drivers/vfio/vfio_iommu_spapr_tce.c >+++ b/drivers/vfio/vfio_iommu_spapr_tce.c >@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data) > struct tce_container *container = iommu_data; > > WARN_ON(container->tbl && !container->tbl->it_group); >- tce_iommu_disable(container); > > if (container->tbl && container->tbl->it_group) > tce_iommu_detach_group(iommu_data, container->tbl->it_group); > >+ tce_iommu_disable(container); > mutex_destroy(&container->lock); > > kfree(container); > } > >+static void tce_iommu_unuse_page(struct tce_container *container, >+ unsigned long oldtce) >+{ >+ struct page *page; >+ >+ if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE))) >+ return; >+ It might be worthy to have a global helper function in iommu.h to check if the given TCE entry is empty or not, for better readability. I would think the helper function is used here and there :-) Thanks, Gavin >+ page = pfn_to_page(oldtce >> PAGE_SHIFT); >+ >+ if (oldtce & TCE_PCI_WRITE) >+ SetPageDirty(page); >+ >+ put_page(page); >+} >+ > static int tce_iommu_clear(struct tce_container *container, > struct iommu_table *tbl, > unsigned long entry, unsigned long pages) > { > unsigned long oldtce; >- struct page *page; > > for ( ; pages; --pages, ++entry) { > oldtce = iommu_clear_tce(tbl, entry); > if (!oldtce) > continue; > >- page = pfn_to_page(oldtce >> PAGE_SHIFT); >- WARN_ON(!page); >- if (page) { >- if (oldtce & TCE_PCI_WRITE) >- SetPageDirty(page); >- put_page(page); >- } >+ tce_iommu_unuse_page(container, oldtce); > } > > return 0; > } > >+static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa) >+{ >+ struct page *page = NULL; >+ enum dma_data_direction direction = iommu_tce_direction(tce); >+ >+ if (get_user_pages_fast(tce & PAGE_MASK, 1, >+ direction != DMA_TO_DEVICE, &page) != 1) >+ return -EFAULT; >+ >+ *hpa = __pa((unsigned long) page_address(page)); >+ >+ return 0; >+} >+ > static long tce_iommu_build(struct tce_container *container, > struct iommu_table *tbl, > unsigned long entry, unsigned long tce, unsigned long pages) > { > long i, ret = 0; >- struct page *page = NULL; >- unsigned long hva; >+ struct page *page; >+ unsigned long hpa; > enum dma_data_direction direction = iommu_tce_direction(tce); > > for (i = 0; i < pages; ++i) { > unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; > >- ret = get_user_pages_fast(tce & PAGE_MASK, 1, >- direction != DMA_TO_DEVICE, &page); >- if (unlikely(ret != 1)) { >- ret = -EFAULT; >+ ret = tce_iommu_use_page(tce, &hpa); >+ if (ret) > break; >- } > >+ page = pfn_to_page(hpa >> PAGE_SHIFT); > if (!tce_page_is_contained(page, tbl->it_page_shift)) { > ret = -EPERM; > break; > } > >- hva = (unsigned long) page_address(page) + offset; >- >- ret = iommu_tce_build(tbl, entry + i, hva, direction); >+ hpa |= offset; >+ ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa), >+ direction); > if (ret) { >- put_page(page); >+ tce_iommu_unuse_page(container, hpa); > pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", > __func__, entry << tbl->it_page_shift, > tce, ret); >-- >2.4.0.rc3.8.gfb3e7d5 > From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from e23smtp03.au.ibm.com (e23smtp03.au.ibm.com [202.81.31.145]) (using TLSv1 with cipher CAMELLIA256-SHA (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 5715D1A0709 for ; Wed, 13 May 2015 16:33:46 +1000 (AEST) Received: from /spool/local by e23smtp03.au.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Wed, 13 May 2015 16:33:44 +1000 Received: from d23relay06.au.ibm.com (d23relay06.au.ibm.com [9.185.63.219]) by d23dlp03.au.ibm.com (Postfix) with ESMTP id 3DD533578052 for ; Wed, 13 May 2015 16:33:42 +1000 (EST) Received: from d23av01.au.ibm.com (d23av01.au.ibm.com [9.190.234.96]) by d23relay06.au.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id t4D6XX7H20250874 for ; Wed, 13 May 2015 16:33:42 +1000 Received: from d23av01.au.ibm.com (localhost [127.0.0.1]) by d23av01.au.ibm.com (8.14.4/8.14.4/NCO v10.0 AVout) with ESMTP id t4D6X9vh020412 for ; Wed, 13 May 2015 16:33:09 +1000 Date: Wed, 13 May 2015 16:32:42 +1000 From: Gavin Shan To: Alexey Kardashevskiy Subject: Re: [PATCH kernel v10 11/34] vfio: powerpc/spapr: Moving pinning/unpinning to helpers Message-ID: <20150513063242.GA9547@gwshan> Reply-To: Gavin Shan References: <1431358763-24371-1-git-send-email-aik@ozlabs.ru> <1431358763-24371-12-git-send-email-aik@ozlabs.ru> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii In-Reply-To: <1431358763-24371-12-git-send-email-aik@ozlabs.ru> Cc: Wei Yang , Gavin Shan , linux-kernel@vger.kernel.org, Alex Williamson , Paul Mackerras , linuxppc-dev@lists.ozlabs.org, David Gibson List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , On Tue, May 12, 2015 at 01:39:00AM +1000, Alexey Kardashevskiy wrote: >This is a pretty mechanical patch to make next patches simpler. > >New tce_iommu_unuse_page() helper does put_page() now but it might skip >that after the memory registering patch applied. > >As we are here, this removes unnecessary checks for a value returned >by pfn_to_page() as it cannot possibly return NULL. > >This moves tce_iommu_disable() later to let tce_iommu_clear() know if >the container has been enabled because if it has not been, then >put_page() must not be called on TCEs from the TCE table. This situation >is not yet possible but it will after KVM acceleration patchset is >applied. > >This changes code to work with physical addresses rather than linear >mapping addresses for better code readability. Following patches will >add an xchg() callback for an IOMMU table which will accept/return >physical addresses (unlike current tce_build()) which will eliminate >redundant conversions. > >Signed-off-by: Alexey Kardashevskiy >[aw: for the vfio related changes] >Acked-by: Alex Williamson >Reviewed-by: David Gibson Reviewed-by: Gavin Shan >--- >Changes: >v9: >* changed helpers to work with physical addresses rather than linear >(for simplicity - later ::xchg() will receive physical and avoid >additional convertions) > >v6: >* tce_get_hva() returns hva via a pointer >--- > drivers/vfio/vfio_iommu_spapr_tce.c | 61 +++++++++++++++++++++++++------------ > 1 file changed, 41 insertions(+), 20 deletions(-) > >diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c >index e21479c..115d5e6 100644 >--- a/drivers/vfio/vfio_iommu_spapr_tce.c >+++ b/drivers/vfio/vfio_iommu_spapr_tce.c >@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data) > struct tce_container *container = iommu_data; > > WARN_ON(container->tbl && !container->tbl->it_group); >- tce_iommu_disable(container); > > if (container->tbl && container->tbl->it_group) > tce_iommu_detach_group(iommu_data, container->tbl->it_group); > >+ tce_iommu_disable(container); > mutex_destroy(&container->lock); > > kfree(container); > } > >+static void tce_iommu_unuse_page(struct tce_container *container, >+ unsigned long oldtce) >+{ >+ struct page *page; >+ >+ if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE))) >+ return; >+ It might be worthy to have a global helper function in iommu.h to check if the given TCE entry is empty or not, for better readability. I would think the helper function is used here and there :-) Thanks, Gavin >+ page = pfn_to_page(oldtce >> PAGE_SHIFT); >+ >+ if (oldtce & TCE_PCI_WRITE) >+ SetPageDirty(page); >+ >+ put_page(page); >+} >+ > static int tce_iommu_clear(struct tce_container *container, > struct iommu_table *tbl, > unsigned long entry, unsigned long pages) > { > unsigned long oldtce; >- struct page *page; > > for ( ; pages; --pages, ++entry) { > oldtce = iommu_clear_tce(tbl, entry); > if (!oldtce) > continue; > >- page = pfn_to_page(oldtce >> PAGE_SHIFT); >- WARN_ON(!page); >- if (page) { >- if (oldtce & TCE_PCI_WRITE) >- SetPageDirty(page); >- put_page(page); >- } >+ tce_iommu_unuse_page(container, oldtce); > } > > return 0; > } > >+static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa) >+{ >+ struct page *page = NULL; >+ enum dma_data_direction direction = iommu_tce_direction(tce); >+ >+ if (get_user_pages_fast(tce & PAGE_MASK, 1, >+ direction != DMA_TO_DEVICE, &page) != 1) >+ return -EFAULT; >+ >+ *hpa = __pa((unsigned long) page_address(page)); >+ >+ return 0; >+} >+ > static long tce_iommu_build(struct tce_container *container, > struct iommu_table *tbl, > unsigned long entry, unsigned long tce, unsigned long pages) > { > long i, ret = 0; >- struct page *page = NULL; >- unsigned long hva; >+ struct page *page; >+ unsigned long hpa; > enum dma_data_direction direction = iommu_tce_direction(tce); > > for (i = 0; i < pages; ++i) { > unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; > >- ret = get_user_pages_fast(tce & PAGE_MASK, 1, >- direction != DMA_TO_DEVICE, &page); >- if (unlikely(ret != 1)) { >- ret = -EFAULT; >+ ret = tce_iommu_use_page(tce, &hpa); >+ if (ret) > break; >- } > >+ page = pfn_to_page(hpa >> PAGE_SHIFT); > if (!tce_page_is_contained(page, tbl->it_page_shift)) { > ret = -EPERM; > break; > } > >- hva = (unsigned long) page_address(page) + offset; >- >- ret = iommu_tce_build(tbl, entry + i, hva, direction); >+ hpa |= offset; >+ ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa), >+ direction); > if (ret) { >- put_page(page); >+ tce_iommu_unuse_page(container, hpa); > pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", > __func__, entry << tbl->it_page_shift, > tce, ret); >-- >2.4.0.rc3.8.gfb3e7d5 >