All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
To: davem@davemloft.net
Cc: Maciej Fijalkowski <maciej.fijalkowski@intel.com>,
	netdev@vger.kernel.org, nhorman@redhat.com, sassmann@redhat.com,
	Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>,
	Andrew Bowers <andrewx.bowers@intel.com>,
	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Subject: [net-next 08/15] ice: Introduce bulk update for page count
Date: Mon, 25 Mar 2019 13:29:46 -0700	[thread overview]
Message-ID: <20190325202953.32095-9-jeffrey.t.kirsher@intel.com> (raw)
In-Reply-To: <20190325202953.32095-1-jeffrey.t.kirsher@intel.com>

From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>

{get,put}_page are atomic operations which we use for page count
handling. The current logic for refcount handling is that we increment
it when passing a skb with the data from the first half of page up to
netstack and recycle the second half of page. This operation protects us
from losing a page since the network stack can decrement the refcount of
page from skb.

The performance can be gently improved by doing the bulk updates of
refcount instead of doing it one by one. During the buffer initialization,
maximize the page's refcount and don't allow the refcount to become
less than two.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
 drivers/net/ethernet/intel/ice/ice_txrx.c | 26 +++++++++++++++++------
 drivers/net/ethernet/intel/ice/ice_txrx.h |  1 +
 2 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index becee476002d..d003f4d49ae6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -283,7 +283,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
 			continue;
 
 		dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
-		__free_pages(rx_buf->page, 0);
+		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 
 		rx_buf->page = NULL;
 		rx_buf->page_offset = 0;
@@ -423,6 +423,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
 	bi->dma = dma;
 	bi->page = page;
 	bi->page_offset = 0;
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
 
 	return true;
 }
@@ -509,6 +511,7 @@ static bool ice_page_is_reserved(struct page *page)
 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
 				  unsigned int truesize)
 {
+	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
 	struct page *page = rx_buf->page;
 
 	/* avoid re-using remote pages */
@@ -517,7 +520,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
 
 #if (PAGE_SIZE < 8192)
 	/* if we are only owner of page we can reuse it */
-	if (unlikely(page_count(page) != 1))
+	if (unlikely((page_count(page) - pagecnt_bias) > 1))
 		return false;
 
 	/* flip page offset to other buffer */
@@ -530,10 +533,14 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
 		return false;
 #endif /* PAGE_SIZE < 8192) */
 
-	/* Even if we own the page, we are not allowed to use atomic_set()
-	 * This would break get_page_unless_zero() users.
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
 	 */
-	get_page(page);
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX - 1);
+		rx_buf->pagecnt_bias = USHRT_MAX;
+	}
 
 	return true;
 }
@@ -576,11 +583,12 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as-is */
-		if (likely(!ice_page_is_reserved(page)))
+		if (likely(!ice_page_is_reserved(page))) {
+			rx_buf->pagecnt_bias++;
 			return true;
+		}
 
 		/* this page cannot be reused so discard it */
-		__free_pages(page, 0);
 		return false;
 	}
 
@@ -650,6 +658,9 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size)
 				      rx_buf->page_offset, size,
 				      DMA_FROM_DEVICE);
 
+	/* We have pulled a buffer for use, so decrement pagecnt_bias */
+	rx_buf->pagecnt_bias--;
+
 	return rx_buf;
 }
 
@@ -703,6 +714,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 		/* we are not reusing the buffer so unmap it */
 		dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
 			       DMA_FROM_DEVICE);
+		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 	}
 
 	/* clear contents of buffer_info */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b7ff0ff82517..43b39e7ce470 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -73,6 +73,7 @@ struct ice_rx_buf {
 	dma_addr_t dma;
 	struct page *page;
 	unsigned int page_offset;
+	u16 pagecnt_bias;
 };
 
 struct ice_q_stats {
-- 
2.20.1


  parent reply	other threads:[~2019-03-25 20:30 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-25 20:29 [net-next 00/15][pull request] 100GbE Intel Wired LAN Driver Updates 2019-03-25 Jeff Kirsher
2019-03-25 20:29 ` [net-next 01/15] ice: update VSI config dynamically Jeff Kirsher
2019-03-25 20:29 ` [net-next 02/15] ice: Restore VLAN switch rule if port VLAN existed before Jeff Kirsher
2019-03-25 20:29 ` [net-next 03/15] ice: use irq_num var in ice_vsi_req_irq_msix Jeff Kirsher
2019-03-25 20:29 ` [net-next 04/15] ice: Enable link events over the ARQ Jeff Kirsher
2019-03-25 20:29 ` [net-next 05/15] ice: Retrieve rx_buf in separate function Jeff Kirsher
2019-03-25 20:29 ` [net-next 06/15] ice: Pull out page reuse checks onto " Jeff Kirsher
2019-03-25 20:29 ` [net-next 07/15] ice: Get rid of ice_pull_tail Jeff Kirsher
2019-03-25 20:29 ` Jeff Kirsher [this message]
2019-03-25 20:29 ` [net-next 09/15] ice: Gather the rx buf clean-up logic for better reuse Jeff Kirsher
2019-03-25 20:29 ` [net-next 10/15] ice: Limit the ice_add_rx_frag to frag addition Jeff Kirsher
2019-03-25 20:29 ` [net-next 11/15] ice: map Rx buffer pages with DMA attributes Jeff Kirsher
2019-03-25 20:29 ` [net-next 12/15] ice: Prevent unintended multiple chain resets Jeff Kirsher
2019-03-25 20:29 ` [net-next 13/15] ice: change VF VSI tc info along with num_queues Jeff Kirsher
2019-03-25 20:29 ` [net-next 14/15] ice: add and use new ice_for_each_traffic_class() macro Jeff Kirsher
2019-03-25 20:29 ` [net-next 15/15] ice: Create a generic name for the ice_rx_flg64_bits structure Jeff Kirsher
2019-03-26 16:41 ` [net-next 00/15][pull request] 100GbE Intel Wired LAN Driver Updates 2019-03-25 David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190325202953.32095-9-jeffrey.t.kirsher@intel.com \
    --to=jeffrey.t.kirsher@intel.com \
    --cc=andrewx.bowers@intel.com \
    --cc=anirudh.venkataramanan@intel.com \
    --cc=davem@davemloft.net \
    --cc=maciej.fijalkowski@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=nhorman@redhat.com \
    --cc=sassmann@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.