linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
@ 2019-11-13 13:45 Christoph Hellwig
  2019-11-13 18:49 ` Jerome Glisse
  2019-11-15  6:18 ` John Hubbard
  0 siblings, 2 replies; 4+ messages in thread
From: Christoph Hellwig @ 2019-11-13 13:45 UTC (permalink / raw)
  To: jgg, jglisse; +Cc: linux-mm, linux-kernel

These two functions have never been used since they were added.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/hmm.h |  23 -------
 mm/hmm.c            | 147 --------------------------------------------
 2 files changed, 170 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b4af51735232..922c8d015cdb 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -230,34 +230,11 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
  * Please see Documentation/vm/hmm.rst for how to use the range API.
  */
 long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-
-long hmm_range_dma_map(struct hmm_range *range,
-		       struct device *device,
-		       dma_addr_t *daddrs,
-		       unsigned int flags);
-long hmm_range_dma_unmap(struct hmm_range *range,
-			 struct device *device,
-			 dma_addr_t *daddrs,
-			 bool dirty);
 #else
 static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 {
 	return -EOPNOTSUPP;
 }
-
-static inline long hmm_range_dma_map(struct hmm_range *range,
-				     struct device *device, dma_addr_t *daddrs,
-				     unsigned int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline long hmm_range_dma_unmap(struct hmm_range *range,
-				       struct device *device,
-				       dma_addr_t *daddrs, bool dirty)
-{
-	return -EOPNOTSUPP;
-}
 #endif
 
 /*
diff --git a/mm/hmm.c b/mm/hmm.c
index 9e9d3f4ea17c..ab883eefe847 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -698,150 +698,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
 }
 EXPORT_SYMBOL(hmm_range_fault);
-
-/**
- * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
- * @range:	range being faulted
- * @device:	device to map page to
- * @daddrs:	array of dma addresses for the mapped pages
- * @flags:	HMM_FAULT_*
- *
- * Return: the number of pages mapped on success (including zero), or any
- * status return from hmm_range_fault() otherwise.
- */
-long hmm_range_dma_map(struct hmm_range *range, struct device *device,
-		dma_addr_t *daddrs, unsigned int flags)
-{
-	unsigned long i, npages, mapped;
-	long ret;
-
-	ret = hmm_range_fault(range, flags);
-	if (ret <= 0)
-		return ret ? ret : -EBUSY;
-
-	npages = (range->end - range->start) >> PAGE_SHIFT;
-	for (i = 0, mapped = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		/*
-		 * FIXME need to update DMA API to provide invalid DMA address
-		 * value instead of a function to test dma address value. This
-		 * would remove lot of dumb code duplicated accross many arch.
-		 *
-		 * For now setting it to 0 here is good enough as the pfns[]
-		 * value is what is use to check what is valid and what isn't.
-		 */
-		daddrs[i] = 0;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		/* Check if range is being invalidated */
-		if (mmu_range_check_retry(range->notifier,
-					  range->notifier_seq)) {
-			ret = -EBUSY;
-			goto unmap;
-		}
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
-			dir = DMA_BIDIRECTIONAL;
-
-		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
-		if (dma_mapping_error(device, daddrs[i])) {
-			ret = -EFAULT;
-			goto unmap;
-		}
-
-		mapped++;
-	}
-
-	return mapped;
-
-unmap:
-	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		if (dma_mapping_error(device, daddrs[i]))
-			continue;
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
-			dir = DMA_BIDIRECTIONAL;
-
-		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
-		mapped--;
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(hmm_range_dma_map);
-
-/**
- * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
- * @range: range being unmapped
- * @device: device against which dma map was done
- * @daddrs: dma address of mapped pages
- * @dirty: dirty page if it had the write flag set
- * Return: number of page unmapped on success, -EINVAL otherwise
- *
- * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
- * to the sync_cpu_device_pagetables() callback so that it is safe here to
- * call set_page_dirty(). Caller must also take appropriate locks to avoid
- * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
- */
-long hmm_range_dma_unmap(struct hmm_range *range,
-			 struct device *device,
-			 dma_addr_t *daddrs,
-			 bool dirty)
-{
-	unsigned long i, npages;
-	long cpages = 0;
-
-	/* Sanity check. */
-	if (range->end <= range->start)
-		return -EINVAL;
-	if (!daddrs)
-		return -EINVAL;
-	if (!range->pfns)
-		return -EINVAL;
-
-	npages = (range->end - range->start) >> PAGE_SHIFT;
-	for (i = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
-			dir = DMA_BIDIRECTIONAL;
-
-			/*
-			 * See comments in function description on why it is
-			 * safe here to call set_page_dirty()
-			 */
-			if (dirty)
-				set_page_dirty(page);
-		}
-
-		/* Unmap and clear pfns/dma address */
-		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
-		range->pfns[i] = range->values[HMM_PFN_NONE];
-		/* FIXME see comments in hmm_vma_dma_map() */
-		daddrs[i] = 0;
-		cpages++;
-	}
-
-	return cpages;
-}
-EXPORT_SYMBOL(hmm_range_dma_unmap);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
  2019-11-13 13:45 [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap Christoph Hellwig
@ 2019-11-13 18:49 ` Jerome Glisse
  2019-11-19 18:49   ` Jason Gunthorpe
  2019-11-15  6:18 ` John Hubbard
  1 sibling, 1 reply; 4+ messages in thread
From: Jerome Glisse @ 2019-11-13 18:49 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: jgg, linux-mm, linux-kernel

On Wed, Nov 13, 2019 at 02:45:28PM +0100, Christoph Hellwig wrote:
> These two functions have never been used since they were added.

The mlx5 convertion (which has been posted few times now) uses them
dunno what Jason plans is on that front.

> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/hmm.h |  23 -------
>  mm/hmm.c            | 147 --------------------------------------------
>  2 files changed, 170 deletions(-)
> 
> diff --git a/include/linux/hmm.h b/include/linux/hmm.h
> index b4af51735232..922c8d015cdb 100644
> --- a/include/linux/hmm.h
> +++ b/include/linux/hmm.h
> @@ -230,34 +230,11 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
>   * Please see Documentation/vm/hmm.rst for how to use the range API.
>   */
>  long hmm_range_fault(struct hmm_range *range, unsigned int flags);
> -
> -long hmm_range_dma_map(struct hmm_range *range,
> -		       struct device *device,
> -		       dma_addr_t *daddrs,
> -		       unsigned int flags);
> -long hmm_range_dma_unmap(struct hmm_range *range,
> -			 struct device *device,
> -			 dma_addr_t *daddrs,
> -			 bool dirty);
>  #else
>  static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
>  {
>  	return -EOPNOTSUPP;
>  }
> -
> -static inline long hmm_range_dma_map(struct hmm_range *range,
> -				     struct device *device, dma_addr_t *daddrs,
> -				     unsigned int flags)
> -{
> -	return -EOPNOTSUPP;
> -}
> -
> -static inline long hmm_range_dma_unmap(struct hmm_range *range,
> -				       struct device *device,
> -				       dma_addr_t *daddrs, bool dirty)
> -{
> -	return -EOPNOTSUPP;
> -}
>  #endif
>  
>  /*
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 9e9d3f4ea17c..ab883eefe847 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -698,150 +698,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
>  	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
>  }
>  EXPORT_SYMBOL(hmm_range_fault);
> -
> -/**
> - * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
> - * @range:	range being faulted
> - * @device:	device to map page to
> - * @daddrs:	array of dma addresses for the mapped pages
> - * @flags:	HMM_FAULT_*
> - *
> - * Return: the number of pages mapped on success (including zero), or any
> - * status return from hmm_range_fault() otherwise.
> - */
> -long hmm_range_dma_map(struct hmm_range *range, struct device *device,
> -		dma_addr_t *daddrs, unsigned int flags)
> -{
> -	unsigned long i, npages, mapped;
> -	long ret;
> -
> -	ret = hmm_range_fault(range, flags);
> -	if (ret <= 0)
> -		return ret ? ret : -EBUSY;
> -
> -	npages = (range->end - range->start) >> PAGE_SHIFT;
> -	for (i = 0, mapped = 0; i < npages; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		/*
> -		 * FIXME need to update DMA API to provide invalid DMA address
> -		 * value instead of a function to test dma address value. This
> -		 * would remove lot of dumb code duplicated accross many arch.
> -		 *
> -		 * For now setting it to 0 here is good enough as the pfns[]
> -		 * value is what is use to check what is valid and what isn't.
> -		 */
> -		daddrs[i] = 0;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		/* Check if range is being invalidated */
> -		if (mmu_range_check_retry(range->notifier,
> -					  range->notifier_seq)) {
> -			ret = -EBUSY;
> -			goto unmap;
> -		}
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
> -			dir = DMA_BIDIRECTIONAL;
> -
> -		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
> -		if (dma_mapping_error(device, daddrs[i])) {
> -			ret = -EFAULT;
> -			goto unmap;
> -		}
> -
> -		mapped++;
> -	}
> -
> -	return mapped;
> -
> -unmap:
> -	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		if (dma_mapping_error(device, daddrs[i]))
> -			continue;
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
> -			dir = DMA_BIDIRECTIONAL;
> -
> -		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
> -		mapped--;
> -	}
> -
> -	return ret;
> -}
> -EXPORT_SYMBOL(hmm_range_dma_map);
> -
> -/**
> - * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
> - * @range: range being unmapped
> - * @device: device against which dma map was done
> - * @daddrs: dma address of mapped pages
> - * @dirty: dirty page if it had the write flag set
> - * Return: number of page unmapped on success, -EINVAL otherwise
> - *
> - * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
> - * to the sync_cpu_device_pagetables() callback so that it is safe here to
> - * call set_page_dirty(). Caller must also take appropriate locks to avoid
> - * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
> - */
> -long hmm_range_dma_unmap(struct hmm_range *range,
> -			 struct device *device,
> -			 dma_addr_t *daddrs,
> -			 bool dirty)
> -{
> -	unsigned long i, npages;
> -	long cpages = 0;
> -
> -	/* Sanity check. */
> -	if (range->end <= range->start)
> -		return -EINVAL;
> -	if (!daddrs)
> -		return -EINVAL;
> -	if (!range->pfns)
> -		return -EINVAL;
> -
> -	npages = (range->end - range->start) >> PAGE_SHIFT;
> -	for (i = 0; i < npages; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
> -			dir = DMA_BIDIRECTIONAL;
> -
> -			/*
> -			 * See comments in function description on why it is
> -			 * safe here to call set_page_dirty()
> -			 */
> -			if (dirty)
> -				set_page_dirty(page);
> -		}
> -
> -		/* Unmap and clear pfns/dma address */
> -		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
> -		range->pfns[i] = range->values[HMM_PFN_NONE];
> -		/* FIXME see comments in hmm_vma_dma_map() */
> -		daddrs[i] = 0;
> -		cpages++;
> -	}
> -
> -	return cpages;
> -}
> -EXPORT_SYMBOL(hmm_range_dma_unmap);
> -- 
> 2.20.1
> 


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
  2019-11-13 13:45 [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap Christoph Hellwig
  2019-11-13 18:49 ` Jerome Glisse
@ 2019-11-15  6:18 ` John Hubbard
  1 sibling, 0 replies; 4+ messages in thread
From: John Hubbard @ 2019-11-15  6:18 UTC (permalink / raw)
  To: Christoph Hellwig, jgg, jglisse; +Cc: linux-mm, linux-kernel

On 11/13/19 5:45 AM, Christoph Hellwig wrote:
> These two functions have never been used since they were added.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/hmm.h |  23 -------
>  mm/hmm.c            | 147 --------------------------------------------
>  2 files changed, 170 deletions(-)

Reviewed-by: John Hubbard <jhubbard@nvidia.com>

Also +CC Ralph just for another look. But I know we're not using these 
routines.


thanks,
-- 
John Hubbard
NVIDIA

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
  2019-11-13 18:49 ` Jerome Glisse
@ 2019-11-19 18:49   ` Jason Gunthorpe
  0 siblings, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2019-11-19 18:49 UTC (permalink / raw)
  To: Jerome Glisse; +Cc: Christoph Hellwig, linux-mm, linux-kernel

On Wed, Nov 13, 2019 at 01:49:43PM -0500, Jerome Glisse wrote:
> On Wed, Nov 13, 2019 at 02:45:28PM +0100, Christoph Hellwig wrote:
> > These two functions have never been used since they were added.
> 
> The mlx5 convertion (which has been posted few times now) uses them
> dunno what Jason plans is on that front.

IMHO if ODP is going to be the only user then we should just keep the
existing ODP code.

Lets drop them until someone can come up with a series to migrate
several drivers.. It is small so it would not be hard to bring them
back if needed.

So applied to hmm.git

Thanks,
Jason

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-11-19 18:49 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-13 13:45 [PATCH] mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap Christoph Hellwig
2019-11-13 18:49 ` Jerome Glisse
2019-11-19 18:49   ` Jason Gunthorpe
2019-11-15  6:18 ` John Hubbard

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).