All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
To: Lorenzo Bianconi <lorenzo@kernel.org>
Cc: netdev@vger.kernel.org, brouer@redhat.com, davem@davemloft.net,
	lorenzo.bianconi@redhat.com
Subject: Re: [PATCH v2 net-next] net: socionext: get rid of huge dma sync in netsec_alloc_rx_data
Date: Fri, 10 Jan 2020 16:56:31 +0200	[thread overview]
Message-ID: <20200110145631.GA69461@apalos.home> (raw)
In-Reply-To: <81eeb4aaf1cbbbdcd4f58c5a7f06bdab67f20633.1578664483.git.lorenzo@kernel.org>

On Fri, Jan 10, 2020 at 02:57:44PM +0100, Lorenzo Bianconi wrote:
> Socionext driver can run on dma coherent and non-coherent devices.
> Get rid of huge dma_sync_single_for_device in netsec_alloc_rx_data since
> now the driver can let page_pool API to managed needed DMA sync
> 
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
> Changes since v1:
> - rely on original frame size for dma sync
> ---
>  drivers/net/ethernet/socionext/netsec.c | 43 +++++++++++++++----------
>  1 file changed, 26 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
> index b5a9e947a4a8..45c76b437457 100644
> --- a/drivers/net/ethernet/socionext/netsec.c
> +++ b/drivers/net/ethernet/socionext/netsec.c
> @@ -243,6 +243,7 @@
>  			       NET_IP_ALIGN)
>  #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
>  				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
> +#define NETSEC_RX_BUF_SIZE	(PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
>  
>  #define DESC_SZ	sizeof(struct netsec_de)
>  
> @@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
>  {
>  
>  	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
> -	enum dma_data_direction dma_dir;
>  	struct page *page;
>  
>  	page = page_pool_dev_alloc_pages(dring->page_pool);
> @@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
>  	/* Make sure the incoming payload fits in the page for XDP and non-XDP
>  	 * cases and reserve enough space for headroom + skb_shared_info
>  	 */
> -	*desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
> -	dma_dir = page_pool_get_dma_dir(dring->page_pool);
> -	dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
> +	*desc_len = NETSEC_RX_BUF_SIZE;
>  
>  	return page_address(page);
>  }
> @@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
>  static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  			  struct xdp_buff *xdp)
>  {
> +	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
> +	unsigned int len = xdp->data_end - xdp->data;

We need to account for XDP expanding the headers as well here. 
So something like max(xdp->data_end(before bpf), xdp->data_end(after bpf)) -
xdp->data (original)

>  	u32 ret = NETSEC_XDP_PASS;
>  	int err;
>  	u32 act;
> @@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  	case XDP_TX:
>  		ret = netsec_xdp_xmit_back(priv, xdp);
>  		if (ret != NETSEC_XDP_TX)
> -			xdp_return_buff(xdp);
> +			__page_pool_put_page(dring->page_pool,
> +				     virt_to_head_page(xdp->data),
> +				     len, true);
>  		break;
>  	case XDP_REDIRECT:
>  		err = xdp_do_redirect(priv->ndev, xdp, prog);
> @@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  			ret = NETSEC_XDP_REDIR;
>  		} else {
>  			ret = NETSEC_XDP_CONSUMED;
> -			xdp_return_buff(xdp);
> +			__page_pool_put_page(dring->page_pool,
> +				     virt_to_head_page(xdp->data),
> +				     len, true);
>  		}
>  		break;
>  	default:
> @@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  		/* fall through -- handle aborts by dropping packet */
>  	case XDP_DROP:
>  		ret = NETSEC_XDP_CONSUMED;
> -		xdp_return_buff(xdp);
> +		__page_pool_put_page(dring->page_pool,
> +				     virt_to_head_page(xdp->data),
> +				     len, true);
>  		break;
>  	}
>  
> @@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
>  			 * cache state. Since we paid the allocation cost if
>  			 * building an skb fails try to put the page into cache
>  			 */
> -			page_pool_recycle_direct(dring->page_pool, page);
> +			__page_pool_put_page(dring->page_pool, page,
> +					     pkt_len, true);

Same here, a bpf prog with XDP_PASS verdict might change lenghts

>  			netif_err(priv, drv, priv->ndev,
>  				  "rx failed to build skb\n");
>  			break;
> @@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
>  {
>  	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
>  	struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
> -	struct page_pool_params pp_params = { 0 };
> +	struct page_pool_params pp_params = {
> +		.order = 0,
> +		/* internal DMA mapping in page_pool */
> +		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
> +		.pool_size = DESC_NUM,
> +		.nid = NUMA_NO_NODE,
> +		.dev = priv->dev,
> +		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
> +		.offset = NETSEC_RXBUF_HEADROOM,
> +		.max_len = NETSEC_RX_BUF_SIZE,
> +	};
>  	int i, err;
>  
> -	pp_params.order = 0;
> -	/* internal DMA mapping in page_pool */
> -	pp_params.flags = PP_FLAG_DMA_MAP;
> -	pp_params.pool_size = DESC_NUM;
> -	pp_params.nid = NUMA_NO_NODE;
> -	pp_params.dev = priv->dev;
> -	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
> -
>  	dring->page_pool = page_pool_create(&pp_params);
>  	if (IS_ERR(dring->page_pool)) {
>  		err = PTR_ERR(dring->page_pool);
> -- 
> 2.21.1
> 

Thanks
/Ilias

  reply	other threads:[~2020-01-10 14:56 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-10 13:57 [PATCH v2 net-next] net: socionext: get rid of huge dma sync in netsec_alloc_rx_data Lorenzo Bianconi
2020-01-10 14:56 ` Ilias Apalodimas [this message]
2020-01-10 15:34   ` Lorenzo Bianconi
2020-01-10 17:33     ` Jesper Dangaard Brouer
2020-01-10 18:19       ` Lorenzo Bianconi
2020-01-10 19:01         ` Jesper Dangaard Brouer
2020-01-10 19:19           ` Ilias Apalodimas
2020-01-10 19:36           ` Lorenzo Bianconi
2020-01-13 10:39             ` Ilias Apalodimas
2020-01-14  2:11 ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200110145631.GA69461@apalos.home \
    --to=ilias.apalodimas@linaro.org \
    --cc=brouer@redhat.com \
    --cc=davem@davemloft.net \
    --cc=lorenzo.bianconi@redhat.com \
    --cc=lorenzo@kernel.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.