All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] IB/verbs: Check each operation of dma_ops individually
@ 2015-02-23  4:50 Yuval Shaia
       [not found] ` <1424667027-8790-1-git-send-email-yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
  0 siblings, 1 reply; 4+ messages in thread
From: Yuval Shaia @ 2015-02-23  4:50 UTC (permalink / raw)
  To: yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA, linux-rdma-u79uwXL29TY76Z2rM5mHXA

Current approach force one to implement all ops even when some functions can use the default implementation.
As a result, for new DMA ops (e.x new arch) many functions just wrap the default function.
The fix is to check each DMA operation individually so one can leave empty the ones not need to be override.

Signed-off-by: Yuval Shaia <yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
 include/rdma/ib_verbs.h |   22 +++++++++++-----------
 1 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0d74f1d..166c01a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2145,7 +2145,7 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
  */
 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->mapping_error)
 		return dev->dma_ops->mapping_error(dev, dma_addr);
 	return dma_mapping_error(dev->dma_device, dma_addr);
 }
@@ -2161,7 +2161,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
 				    void *cpu_addr, size_t size,
 				    enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->map_single)
 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
 }
@@ -2177,7 +2177,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
 				       u64 addr, size_t size,
 				       enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->unmap_single)
 		dev->dma_ops->unmap_single(dev, addr, size, direction);
 	else
 		dma_unmap_single(dev->dma_device, addr, size, direction);
@@ -2215,7 +2215,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
 				  size_t size,
 					 enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->map_page)
 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
 	return dma_map_page(dev->dma_device, page, offset, size, direction);
 }
@@ -2231,7 +2231,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
 				     u64 addr, size_t size,
 				     enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->unmap_page)
 		dev->dma_ops->unmap_page(dev, addr, size, direction);
 	else
 		dma_unmap_page(dev->dma_device, addr, size, direction);
@@ -2248,7 +2248,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
 				struct scatterlist *sg, int nents,
 				enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->map_sg)
 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
 	return dma_map_sg(dev->dma_device, sg, nents, direction);
 }
@@ -2264,7 +2264,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
 				   struct scatterlist *sg, int nents,
 				   enum dma_data_direction direction)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->unmap_sg)
 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
 	else
 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
@@ -2325,7 +2325,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
 					      size_t size,
 					      enum dma_data_direction dir)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->sync_single_for_cpu)
 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
 	else
 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
@@ -2343,7 +2343,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
 						 size_t size,
 						 enum dma_data_direction dir)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->sync_single_for_device)
 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
 	else
 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
@@ -2361,7 +2361,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
 					   u64 *dma_handle,
 					   gfp_t flag)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->alloc_coherent)
 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
 	else {
 		dma_addr_t handle;
@@ -2384,7 +2384,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
 					size_t size, void *cpu_addr,
 					u64 dma_handle)
 {
-	if (dev->dma_ops)
+	if (dev->dma_ops && dev->dma_ops->free_coherent)
 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
 	else
 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] IB/verbs: Check each operation of dma_ops individually
       [not found] ` <1424667027-8790-1-git-send-email-yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2015-04-14  8:45   ` Yuval Shaia
  2015-04-14  8:52   ` Sagi Grimberg
  1 sibling, 0 replies; 4+ messages in thread
From: Yuval Shaia @ 2015-04-14  8:45 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA

ping

On Sun, Feb 22, 2015 at 08:50:27PM -0800, Yuval Shaia wrote:
> Current approach force one to implement all ops even when some functions can use the default implementation.
> As a result, for new DMA ops (e.x new arch) many functions just wrap the default function.
> The fix is to check each DMA operation individually so one can leave empty the ones not need to be override.
> 
> Signed-off-by: Yuval Shaia <yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
> ---
>  include/rdma/ib_verbs.h |   22 +++++++++++-----------
>  1 files changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 0d74f1d..166c01a 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -2145,7 +2145,7 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
>   */
>  static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->mapping_error)
>  		return dev->dma_ops->mapping_error(dev, dma_addr);
>  	return dma_mapping_error(dev->dma_device, dma_addr);
>  }
> @@ -2161,7 +2161,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
>  				    void *cpu_addr, size_t size,
>  				    enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_single)
>  		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
>  	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
>  }
> @@ -2177,7 +2177,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
>  				       u64 addr, size_t size,
>  				       enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_single)
>  		dev->dma_ops->unmap_single(dev, addr, size, direction);
>  	else
>  		dma_unmap_single(dev->dma_device, addr, size, direction);
> @@ -2215,7 +2215,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
>  				  size_t size,
>  					 enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_page)
>  		return dev->dma_ops->map_page(dev, page, offset, size, direction);
>  	return dma_map_page(dev->dma_device, page, offset, size, direction);
>  }
> @@ -2231,7 +2231,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
>  				     u64 addr, size_t size,
>  				     enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_page)
>  		dev->dma_ops->unmap_page(dev, addr, size, direction);
>  	else
>  		dma_unmap_page(dev->dma_device, addr, size, direction);
> @@ -2248,7 +2248,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
>  				struct scatterlist *sg, int nents,
>  				enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_sg)
>  		return dev->dma_ops->map_sg(dev, sg, nents, direction);
>  	return dma_map_sg(dev->dma_device, sg, nents, direction);
>  }
> @@ -2264,7 +2264,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
>  				   struct scatterlist *sg, int nents,
>  				   enum dma_data_direction direction)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_sg)
>  		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
>  	else
>  		dma_unmap_sg(dev->dma_device, sg, nents, direction);
> @@ -2325,7 +2325,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
>  					      size_t size,
>  					      enum dma_data_direction dir)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->sync_single_for_cpu)
>  		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
>  	else
>  		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
> @@ -2343,7 +2343,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
>  						 size_t size,
>  						 enum dma_data_direction dir)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->sync_single_for_device)
>  		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
>  	else
>  		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
> @@ -2361,7 +2361,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
>  					   u64 *dma_handle,
>  					   gfp_t flag)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->alloc_coherent)
>  		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
>  	else {
>  		dma_addr_t handle;
> @@ -2384,7 +2384,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
>  					size_t size, void *cpu_addr,
>  					u64 dma_handle)
>  {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->free_coherent)
>  		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
>  	else
>  		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
> -- 
> 1.7.1
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] IB/verbs: Check each operation of dma_ops individually
       [not found] ` <1424667027-8790-1-git-send-email-yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
  2015-04-14  8:45   ` Yuval Shaia
@ 2015-04-14  8:52   ` Sagi Grimberg
       [not found]     ` <552CD53B.3-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
  1 sibling, 1 reply; 4+ messages in thread
From: Sagi Grimberg @ 2015-04-14  8:52 UTC (permalink / raw)
  To: Yuval Shaia, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On 2/23/2015 6:50 AM, Yuval Shaia wrote:
> Current approach force one to implement all ops even when some functions can use the default implementation.
> As a result, for new DMA ops (e.x new arch) many functions just wrap the default function.
> The fix is to check each DMA operation individually so one can leave empty the ones not need to be override.
>

I guess this is OK, but aren't we better off with wrappers than
adding another condition statement?

> Signed-off-by: Yuval Shaia <yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
> ---
>   include/rdma/ib_verbs.h |   22 +++++++++++-----------
>   1 files changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 0d74f1d..166c01a 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -2145,7 +2145,7 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
>    */
>   static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->mapping_error)
>   		return dev->dma_ops->mapping_error(dev, dma_addr);
>   	return dma_mapping_error(dev->dma_device, dma_addr);
>   }
> @@ -2161,7 +2161,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
>   				    void *cpu_addr, size_t size,
>   				    enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_single)
>   		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
>   	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
>   }
> @@ -2177,7 +2177,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
>   				       u64 addr, size_t size,
>   				       enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_single)
>   		dev->dma_ops->unmap_single(dev, addr, size, direction);
>   	else
>   		dma_unmap_single(dev->dma_device, addr, size, direction);
> @@ -2215,7 +2215,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
>   				  size_t size,
>   					 enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_page)
>   		return dev->dma_ops->map_page(dev, page, offset, size, direction);
>   	return dma_map_page(dev->dma_device, page, offset, size, direction);
>   }
> @@ -2231,7 +2231,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
>   				     u64 addr, size_t size,
>   				     enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_page)
>   		dev->dma_ops->unmap_page(dev, addr, size, direction);
>   	else
>   		dma_unmap_page(dev->dma_device, addr, size, direction);
> @@ -2248,7 +2248,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
>   				struct scatterlist *sg, int nents,
>   				enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->map_sg)
>   		return dev->dma_ops->map_sg(dev, sg, nents, direction);
>   	return dma_map_sg(dev->dma_device, sg, nents, direction);
>   }
> @@ -2264,7 +2264,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
>   				   struct scatterlist *sg, int nents,
>   				   enum dma_data_direction direction)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->unmap_sg)
>   		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
>   	else
>   		dma_unmap_sg(dev->dma_device, sg, nents, direction);
> @@ -2325,7 +2325,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
>   					      size_t size,
>   					      enum dma_data_direction dir)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->sync_single_for_cpu)
>   		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
>   	else
>   		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
> @@ -2343,7 +2343,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
>   						 size_t size,
>   						 enum dma_data_direction dir)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->sync_single_for_device)
>   		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
>   	else
>   		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
> @@ -2361,7 +2361,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
>   					   u64 *dma_handle,
>   					   gfp_t flag)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->alloc_coherent)
>   		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
>   	else {
>   		dma_addr_t handle;
> @@ -2384,7 +2384,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
>   					size_t size, void *cpu_addr,
>   					u64 dma_handle)
>   {
> -	if (dev->dma_ops)
> +	if (dev->dma_ops && dev->dma_ops->free_coherent)
>   		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
>   	else
>   		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
>

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] IB/verbs: Check each operation of dma_ops individually
       [not found]     ` <552CD53B.3-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
@ 2015-04-14 20:10       ` Jason Gunthorpe
  0 siblings, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2015-04-14 20:10 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Yuval Shaia, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Tue, Apr 14, 2015 at 11:52:11AM +0300, Sagi Grimberg wrote:
> On 2/23/2015 6:50 AM, Yuval Shaia wrote:
> >Current approach force one to implement all ops even when some functions can use the default implementation.
> >As a result, for new DMA ops (e.x new arch) many functions just wrap the default function.
> >The fix is to check each DMA operation individually so one can leave empty the ones not need to be override.
> >
> 
> I guess this is OK, but aren't we better off with wrappers than
> adding another condition statement?

Yeah, the drivers should provide all the ops.

With the current scheme the core could fill in the NULL ops with
default on device register, other subsystems do this.

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-04-14 20:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-02-23  4:50 [PATCH] IB/verbs: Check each operation of dma_ops individually Yuval Shaia
     [not found] ` <1424667027-8790-1-git-send-email-yuval.shaia-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2015-04-14  8:45   ` Yuval Shaia
2015-04-14  8:52   ` Sagi Grimberg
     [not found]     ` <552CD53B.3-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2015-04-14 20:10       ` Jason Gunthorpe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.