All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net] gve: Fixes DMA synchronization.
@ 2019-10-28 18:23 Yangchun Fu
  2019-10-29  8:18 ` Simon Horman
  2019-10-30  0:41 ` David Miller
  0 siblings, 2 replies; 5+ messages in thread
From: Yangchun Fu @ 2019-10-28 18:23 UTC (permalink / raw)
  To: netdev; +Cc: Yangchun Fu, Catherine Sullivan

Synces the DMA buffer properly in order for CPU and device to see
the most up-to-data data.

Signed-off-by: Yangchun Fu <yangchun@google.com>
Reviewed-by: Catherine Sullivan <csully@google.com>
---
 drivers/net/ethernet/google/gve/gve_rx.c |  2 ++
 drivers/net/ethernet/google/gve/gve_tx.c | 26 ++++++++++++++++++++++--
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 59564ac99d2a..edec61dfc868 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
 	len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
 	page_info = &rx->data.page_info[idx];
+	dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+				PAGE_SIZE, DMA_FROM_DEVICE);
 
 	/* gvnic can only receive into registered segments. If the buffer
 	 * can't be recycled, our only choice is to copy the data out of
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 778b87b5a06c..d8342b7b9764 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -390,7 +390,23 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
 	seg_desc->seg.seg_addr = cpu_to_be64(addr);
 }
 
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static inline void gve_dma_sync_for_device(struct gve_priv *priv,
+					   dma_addr_t *page_buses,
+					   u64 iov_offset, u64 iov_len)
+{
+	u64 addr;
+	dma_addr_t dma;
+
+	for (addr = iov_offset; addr < iov_offset + iov_len;
+	     addr += PAGE_SIZE) {
+		dma = page_buses[addr / PAGE_SIZE];
+		dma_sync_single_for_device(&priv->pdev->dev, dma, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+	}
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+			  struct gve_priv *priv)
 {
 	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
 	union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +448,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
 	skb_copy_bits(skb, 0,
 		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
 		      hlen);
+	gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
+				info->iov[hdr_nfrags - 1].iov_offset,
+				info->iov[hdr_nfrags - 1].iov_len);
 	copy_offset = hlen;
 
 	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +464,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
 		skb_copy_bits(skb, copy_offset,
 			      tx->tx_fifo.base + info->iov[i].iov_offset,
 			      info->iov[i].iov_len);
+		gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
+					info->iov[i].iov_offset,
+					info->iov[i].iov_len);
 		copy_offset += info->iov[i].iov_len;
 	}
 
@@ -473,7 +495,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
 		return NETDEV_TX_BUSY;
 	}
-	nsegs = gve_tx_add_skb(tx, skb);
+	nsegs = gve_tx_add_skb(tx, skb, priv);
 
 	netdev_tx_sent_queue(tx->netdev_txq, skb->len);
 	skb_tx_timestamp(skb);
-- 
2.24.0.rc0.303.g954a862665-goog


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH net] gve: Fixes DMA synchronization.
  2019-10-28 18:23 [PATCH net] gve: Fixes DMA synchronization Yangchun Fu
@ 2019-10-29  8:18 ` Simon Horman
  2019-10-30 21:50   ` Yangchun Fu
  2019-10-30  0:41 ` David Miller
  1 sibling, 1 reply; 5+ messages in thread
From: Simon Horman @ 2019-10-29  8:18 UTC (permalink / raw)
  To: Yangchun Fu; +Cc: netdev, Catherine Sullivan

Hi Yungchun,

thanks for your patch.

On Mon, Oct 28, 2019 at 11:23:09AM -0700, Yangchun Fu wrote:
> Synces the DMA buffer properly in order for CPU and device to see
> the most up-to-data data.
> 
> Signed-off-by: Yangchun Fu <yangchun@google.com>
> Reviewed-by: Catherine Sullivan <csully@google.com>
> ---
>  drivers/net/ethernet/google/gve/gve_rx.c |  2 ++
>  drivers/net/ethernet/google/gve/gve_tx.c | 26 ++++++++++++++++++++++--
>  2 files changed, 26 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
> index 59564ac99d2a..edec61dfc868 100644
> --- a/drivers/net/ethernet/google/gve/gve_rx.c
> +++ b/drivers/net/ethernet/google/gve/gve_rx.c
> @@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
>  
>  	len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
>  	page_info = &rx->data.page_info[idx];
> +	dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
> +				PAGE_SIZE, DMA_FROM_DEVICE);
>  
>  	/* gvnic can only receive into registered segments. If the buffer
>  	 * can't be recycled, our only choice is to copy the data out of
> diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
> index 778b87b5a06c..d8342b7b9764 100644
> --- a/drivers/net/ethernet/google/gve/gve_tx.c
> +++ b/drivers/net/ethernet/google/gve/gve_tx.c
> @@ -390,7 +390,23 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
>  	seg_desc->seg.seg_addr = cpu_to_be64(addr);
>  }
>  
> -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
> +static inline void gve_dma_sync_for_device(struct gve_priv *priv,

It seems that only priv->pdev->dev is used in this function.  Perhaps it
would be better to pass it to this function rather than all of priv.

> +					   dma_addr_t *page_buses,
> +					   u64 iov_offset, u64 iov_len)
> +{
> +	u64 addr;
> +	dma_addr_t dma;
> +
> +	for (addr = iov_offset; addr < iov_offset + iov_len;
> +	     addr += PAGE_SIZE) {
> +		dma = page_buses[addr / PAGE_SIZE];
> +		dma_sync_single_for_device(&priv->pdev->dev, dma, PAGE_SIZE,
> +					   DMA_TO_DEVICE);
> +	}
> +}
> +
> +static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
> +			  struct gve_priv *priv)
>  {
>  	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
>  	union gve_tx_desc *pkt_desc, *seg_desc;
> @@ -432,6 +448,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
>  	skb_copy_bits(skb, 0,
>  		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
>  		      hlen);
> +	gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
> +				info->iov[hdr_nfrags - 1].iov_offset,
> +				info->iov[hdr_nfrags - 1].iov_len);
>  	copy_offset = hlen;
>  
>  	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
> @@ -445,6 +464,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
>  		skb_copy_bits(skb, copy_offset,
>  			      tx->tx_fifo.base + info->iov[i].iov_offset,
>  			      info->iov[i].iov_len);
> +		gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
> +					info->iov[i].iov_offset,
> +					info->iov[i].iov_len);
>  		copy_offset += info->iov[i].iov_len;
>  	}
>  
> @@ -473,7 +495,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
>  		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
>  		return NETDEV_TX_BUSY;
>  	}
> -	nsegs = gve_tx_add_skb(tx, skb);
> +	nsegs = gve_tx_add_skb(tx, skb, priv);
>  
>  	netdev_tx_sent_queue(tx->netdev_txq, skb->len);
>  	skb_tx_timestamp(skb);
> -- 
> 2.24.0.rc0.303.g954a862665-goog
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net] gve: Fixes DMA synchronization.
  2019-10-28 18:23 [PATCH net] gve: Fixes DMA synchronization Yangchun Fu
  2019-10-29  8:18 ` Simon Horman
@ 2019-10-30  0:41 ` David Miller
  2019-10-30 21:51   ` Yangchun Fu
  1 sibling, 1 reply; 5+ messages in thread
From: David Miller @ 2019-10-30  0:41 UTC (permalink / raw)
  To: yangchun; +Cc: netdev, csully

From: Yangchun Fu <yangchun@google.com>
Date: Mon, 28 Oct 2019 11:23:09 -0700

> diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
> index 778b87b5a06c..d8342b7b9764 100644
> --- a/drivers/net/ethernet/google/gve/gve_tx.c
> +++ b/drivers/net/ethernet/google/gve/gve_tx.c
> @@ -390,7 +390,23 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
>  	seg_desc->seg.seg_addr = cpu_to_be64(addr);
>  }
>  
> -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
> +static inline void gve_dma_sync_for_device(struct gve_priv *priv,
> +					   dma_addr_t *page_buses,
> +					   u64 iov_offset, u64 iov_len)

Never use the inline keyword in foo.c files, let the compiler device.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net] gve: Fixes DMA synchronization.
  2019-10-29  8:18 ` Simon Horman
@ 2019-10-30 21:50   ` Yangchun Fu
  0 siblings, 0 replies; 5+ messages in thread
From: Yangchun Fu @ 2019-10-30 21:50 UTC (permalink / raw)
  To: Simon Horman; +Cc: netdev, Catherine Sullivan

On Mon, Oct 28, 2019 at 11:23:09AM -0700, Yangchun Fu wrote:
>> +static inline void gve_dma_sync_for_device(struct gve_priv *priv,
>
> It seems that only priv->pdev->dev is used in this function.  Perhaps it
> would be better to pass it to this function rather than all of priv.

Thanks for the review. I will send v2 patch with the fix.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net] gve: Fixes DMA synchronization.
  2019-10-30  0:41 ` David Miller
@ 2019-10-30 21:51   ` Yangchun Fu
  0 siblings, 0 replies; 5+ messages in thread
From: Yangchun Fu @ 2019-10-30 21:51 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Catherine Sullivan

On Tue, Oct 29, 2019 at 5:41 PM David Miller <davem@davemloft.net> wrote:
>> diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
>> index 778b87b5a06c..d8342b7b9764 100644
>> --- a/drivers/net/ethernet/google/gve/gve_tx.c
>> +++ b/drivers/net/ethernet/google/gve/gve_tx.c
>> @@ -390,7 +390,23 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
>>       seg_desc->seg.seg_addr = cpu_to_be64(addr);
>>  }
>>
>> -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
>> +static inline void gve_dma_sync_for_device(struct gve_priv *priv,
>> +                                        dma_addr_t *page_buses,
>> +                                        u64 iov_offset, u64 iov_len)
>
> Never use the inline keyword in foo.c files, let the compiler device

Thanks for the review. I will send the v2 patch with the fix.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-10-30 21:51 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-28 18:23 [PATCH net] gve: Fixes DMA synchronization Yangchun Fu
2019-10-29  8:18 ` Simon Horman
2019-10-30 21:50   ` Yangchun Fu
2019-10-30  0:41 ` David Miller
2019-10-30 21:51   ` Yangchun Fu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.