linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] page_pool: split types and declarations from page_pool.h
@ 2023-07-19 12:13 Yunsheng Lin
  2023-07-19 14:01 ` Jeff Johnson
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Yunsheng Lin @ 2023-07-19 12:13 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Lobakin,
	Eric Dumazet, Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

Split types and pure function declarations from page_pool.h
and add them in page_page_types.h, so that C sources can
include page_pool.h and headers should generally only include
page_pool_types.h as suggested by jakub.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Suggested-by: Jakub Kicinski <kuba@kernel.org>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 MAINTAINERS                                   |   1 +
 drivers/net/ethernet/engleder/tsnep_main.c    |   1 +
 drivers/net/ethernet/freescale/fec_main.c     |   1 +
 .../marvell/octeontx2/nic/otx2_common.c       |   1 +
 .../ethernet/marvell/octeontx2/nic/otx2_pf.c  |   1 +
 .../ethernet/mellanox/mlx5/core/en/params.c   |   1 +
 .../net/ethernet/mellanox/mlx5/core/en/xdp.c  |   1 +
 drivers/net/wireless/mediatek/mt76/mt76.h     |   1 +
 include/linux/skbuff.h                        |   2 +-
 include/net/page_pool.h                       | 193 +-----------------
 include/net/page_pool_types.h                 | 193 ++++++++++++++++++
 11 files changed, 206 insertions(+), 190 deletions(-)
 create mode 100644 include/net/page_pool_types.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 9a5863f1b016..2888d63e6e03 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16003,6 +16003,7 @@ L:	netdev@vger.kernel.org
 S:	Supported
 F:	Documentation/networking/page_pool.rst
 F:	include/net/page_pool.h
+F:	include/net/page_pool_types.h
 F:	include/trace/events/page_pool.h
 F:	net/core/page_pool.c
 
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 84751bb303a6..6222aaa5157f 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -28,6 +28,7 @@
 #include <linux/iopoll.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
+#include <net/page_pool.h>
 #include <net/xdp_sock_drv.h>
 
 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b990a486059..cfc07f012254 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -38,6 +38,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/page_pool.h>
 #include <net/selftests.h>
 #include <net/tso.h>
 #include <linux/tcp.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 77c8f650f7ac..b5385ae65dcb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -7,6 +7,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <net/page_pool.h>
 #include <net/tso.h>
 #include <linux/bitfield.h>
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index fe8ea4e531b7..7eca434a0550 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -16,6 +16,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/bitfield.h>
+#include <net/page_pool.h>
 
 #include "otx2_reg.h"
 #include "otx2_common.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5ce28ff7685f..0f152f14165b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
 #include "en/port.h"
 #include "en_accel/en_accel.h"
 #include "en_accel/ipsec.h"
+#include <net/page_pool.h>
 #include <net/xdp_sock_drv.h>
 
 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 40589cebb773..16038c23b7d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -35,6 +35,7 @@
 #include "en/xdp.h"
 #include "en/params.h"
 #include <linux/bitfield.h>
+#include <net/page_pool.h>
 
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 6b07b8fafec2..95c16f11d156 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -15,6 +15,7 @@
 #include <linux/average.h>
 #include <linux/soc/mediatek/mtk_wed.h>
 #include <net/mac80211.h>
+#include <net/page_pool.h>
 #include "util.h"
 #include "testmode.h"
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 91ed66952580..bc4a7d45365b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,7 +32,7 @@
 #include <linux/if_packet.h>
 #include <linux/llist.h>
 #include <net/flow.h>
-#include <net/page_pool.h>
+#include <net/page_pool_types.h>
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <linux/netfilter/nf_conntrack_common.h>
 #endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 126f9e294389..bee12295d729 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -30,107 +30,9 @@
 #ifndef _NET_PAGE_POOL_H
 #define _NET_PAGE_POOL_H
 
-#include <linux/mm.h> /* Needed by ptr_ring */
-#include <linux/ptr_ring.h>
-#include <linux/dma-direction.h>
-
-#define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
-					* map/unmap
-					*/
-#define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
-					* from page_pool will be
-					* DMA-synced-for-device according to
-					* the length provided by the device
-					* driver.
-					* Please note DMA-sync-for-CPU is still
-					* device driver responsibility
-					*/
-#define PP_FLAG_PAGE_FRAG	BIT(2) /* for page frag feature */
-#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP |\
-				 PP_FLAG_DMA_SYNC_DEV |\
-				 PP_FLAG_PAGE_FRAG)
-
-/*
- * Fast allocation side cache array/stack
- *
- * The cache size and refill watermark is related to the network
- * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
- * ring is usually refilled and the max consumed elements will be 64,
- * thus a natural max size of objects needed in the cache.
- *
- * Keeping room for more objects, is due to XDP_DROP use-case.  As
- * XDP_DROP allows the opportunity to recycle objects directly into
- * this array, as it shares the same softirq/NAPI protection.  If
- * cache is already full (or partly full) then the XDP_DROP recycles
- * would have to take a slower code path.
- */
-#define PP_ALLOC_CACHE_SIZE	128
-#define PP_ALLOC_CACHE_REFILL	64
-struct pp_alloc_cache {
-	u32 count;
-	struct page *cache[PP_ALLOC_CACHE_SIZE];
-};
-
-struct page_pool_params {
-	unsigned int	flags;
-	unsigned int	order;
-	unsigned int	pool_size;
-	int		nid;  /* Numa node id to allocate from pages from */
-	struct device	*dev; /* device, for DMA pre-mapping purposes */
-	struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
-	enum dma_data_direction dma_dir; /* DMA mapping direction */
-	unsigned int	max_len; /* max DMA sync memory size */
-	unsigned int	offset;  /* DMA addr offset */
-	void (*init_callback)(struct page *page, void *arg);
-	void *init_arg;
-};
-
-#ifdef CONFIG_PAGE_POOL_STATS
-struct page_pool_alloc_stats {
-	u64 fast; /* fast path allocations */
-	u64 slow; /* slow-path order 0 allocations */
-	u64 slow_high_order; /* slow-path high order allocations */
-	u64 empty; /* failed refills due to empty ptr ring, forcing
-		    * slow path allocation
-		    */
-	u64 refill; /* allocations via successful refill */
-	u64 waive;  /* failed refills due to numa zone mismatch */
-};
-
-struct page_pool_recycle_stats {
-	u64 cached;	/* recycling placed page in the cache. */
-	u64 cache_full; /* cache was full */
-	u64 ring;	/* recycling placed page back into ptr ring */
-	u64 ring_full;	/* page was released from page-pool because
-			 * PTR ring was full.
-			 */
-	u64 released_refcnt; /* page released because of elevated
-			      * refcnt
-			      */
-};
-
-/* This struct wraps the above stats structs so users of the
- * page_pool_get_stats API can pass a single argument when requesting the
- * stats for the page pool.
- */
-struct page_pool_stats {
-	struct page_pool_alloc_stats alloc_stats;
-	struct page_pool_recycle_stats recycle_stats;
-};
-
-int page_pool_ethtool_stats_get_count(void);
-u8 *page_pool_ethtool_stats_get_strings(u8 *data);
-u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
-
-/*
- * Drivers that wish to harvest page pool stats and report them to users
- * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
- * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
- */
-bool page_pool_get_stats(struct page_pool *pool,
-			 struct page_pool_stats *stats);
-#else
+#include <net/page_pool_types.h>
 
+#ifndef CONFIG_PAGE_POOL_STATS
 static inline int page_pool_ethtool_stats_get_count(void)
 {
 	return 0;
@@ -145,72 +47,7 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
 {
 	return data;
 }
-
-#endif
-
-struct page_pool {
-	struct page_pool_params p;
-
-	struct delayed_work release_dw;
-	void (*disconnect)(void *);
-	unsigned long defer_start;
-	unsigned long defer_warn;
-
-	u32 pages_state_hold_cnt;
-	unsigned int frag_offset;
-	struct page *frag_page;
-	long frag_users;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-	/* these stats are incremented while in softirq context */
-	struct page_pool_alloc_stats alloc_stats;
-#endif
-	u32 xdp_mem_id;
-
-	/*
-	 * Data structure for allocation side
-	 *
-	 * Drivers allocation side usually already perform some kind
-	 * of resource protection.  Piggyback on this protection, and
-	 * require driver to protect allocation side.
-	 *
-	 * For NIC drivers this means, allocate a page_pool per
-	 * RX-queue. As the RX-queue is already protected by
-	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
-	 * guarantee that a single napi_struct will only be scheduled
-	 * on a single CPU (see napi_schedule).
-	 */
-	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
-
-	/* Data structure for storing recycled pages.
-	 *
-	 * Returning/freeing pages is more complicated synchronization
-	 * wise, because free's can happen on remote CPUs, with no
-	 * association with allocation resource.
-	 *
-	 * Use ptr_ring, as it separates consumer and producer
-	 * effeciently, it a way that doesn't bounce cache-lines.
-	 *
-	 * TODO: Implement bulk return pages into this structure.
-	 */
-	struct ptr_ring ring;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-	/* recycle stats are per-cpu to avoid locking */
-	struct page_pool_recycle_stats __percpu *recycle_stats;
 #endif
-	atomic_t pages_state_release_cnt;
-
-	/* A page_pool is strictly tied to a single RX-queue being
-	 * protected by NAPI, due to above pp_alloc_cache. This
-	 * refcnt serves purpose is to simplify drivers error handling.
-	 */
-	refcount_t user_cnt;
-
-	u64 destroy_cnt;
-};
-
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
 
 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
 {
@@ -219,9 +56,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
 	return page_pool_alloc_pages(pool, gfp);
 }
 
-struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
-				  unsigned int size, gfp_t gfp);
-
 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
 						    unsigned int *offset,
 						    unsigned int size)
@@ -240,21 +74,7 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
 	return pool->p.dma_dir;
 }
 
-bool page_pool_return_skb_page(struct page *page, bool napi_safe);
-
-struct page_pool *page_pool_create(const struct page_pool_params *params);
-
-struct xdp_mem_info;
-
-#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
-void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
-			   struct xdp_mem_info *mem);
-void page_pool_release_page(struct page_pool *pool, struct page *page);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
-			     int count);
-#else
+#ifndef CONFIG_PAGE_POOL
 static inline void page_pool_unlink_napi(struct page_pool *pool)
 {
 }
@@ -263,6 +83,7 @@ static inline void page_pool_destroy(struct page_pool *pool)
 {
 }
 
+struct xdp_mem_info;
 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
 					 void (*disconnect)(void *),
 					 struct xdp_mem_info *mem)
@@ -279,10 +100,6 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 }
 #endif
 
-void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
-				  unsigned int dma_sync_size,
-				  bool allow_direct);
-
 /* pp_frag_count represents the number of writers who can update the page
  * either by updating skb->data or via DMA mappings for the device.
  * We can't rely on the page refcnt for that as we don't know who might be
@@ -391,8 +208,6 @@ static inline bool page_pool_put(struct page_pool *pool)
 	return refcount_dec_and_test(&pool->user_cnt);
 }
 
-/* Caller must provide appropriate safe context, e.g. NAPI. */
-void page_pool_update_nid(struct page_pool *pool, int new_nid);
 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
 {
 	if (unlikely(pool->p.nid != new_nid))
diff --git a/include/net/page_pool_types.h b/include/net/page_pool_types.h
new file mode 100644
index 000000000000..9dc189082e20
--- /dev/null
+++ b/include/net/page_pool_types.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_PAGE_POOL_TYPES_H
+#define _NET_PAGE_POOL_TYPES_H
+
+#include <linux/ptr_ring.h>
+#include <linux/dma-direction.h>
+
+#define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
+					* map/unmap
+					*/
+#define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
+					* from page_pool will be
+					* DMA-synced-for-device according to
+					* the length provided by the device
+					* driver.
+					* Please note DMA-sync-for-CPU is still
+					* device driver responsibility
+					*/
+#define PP_FLAG_PAGE_FRAG	BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP |\
+				 PP_FLAG_DMA_SYNC_DEV |\
+				 PP_FLAG_PAGE_FRAG)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case.  As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection.  If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE	128
+#define PP_ALLOC_CACHE_REFILL	64
+struct pp_alloc_cache {
+	u32 count;
+	struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+struct page_pool_params {
+	unsigned int	flags;
+	unsigned int	order;
+	unsigned int	pool_size;
+	int		nid;  /* Numa node id to allocate from pages from */
+	struct device	*dev; /* device, for DMA pre-mapping purposes */
+	struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
+	enum dma_data_direction dma_dir; /* DMA mapping direction */
+	unsigned int	max_len; /* max DMA sync memory size */
+	unsigned int	offset;  /* DMA addr offset */
+	void (*init_callback)(struct page *page, void *arg);
+	void *init_arg;
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+struct page_pool_alloc_stats {
+	u64 fast; /* fast path allocations */
+	u64 slow; /* slow-path order 0 allocations */
+	u64 slow_high_order; /* slow-path high order allocations */
+	u64 empty; /* failed refills due to empty ptr ring, forcing
+		    * slow path allocation
+		    */
+	u64 refill; /* allocations via successful refill */
+	u64 waive;  /* failed refills due to numa zone mismatch */
+};
+
+struct page_pool_recycle_stats {
+	u64 cached;	/* recycling placed page in the cache. */
+	u64 cache_full; /* cache was full */
+	u64 ring;	/* recycling placed page back into ptr ring */
+	u64 ring_full;	/* page was released from page-pool because
+			 * PTR ring was full.
+			 */
+	u64 released_refcnt; /* page released because of elevated
+			      * refcnt
+			      */
+};
+
+/* This struct wraps the above stats structs so users of the
+ * page_pool_get_stats API can pass a single argument when requesting the
+ * stats for the page pool.
+ */
+struct page_pool_stats {
+	struct page_pool_alloc_stats alloc_stats;
+	struct page_pool_recycle_stats recycle_stats;
+};
+
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+/*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
+ */
+bool page_pool_get_stats(struct page_pool *pool,
+			 struct page_pool_stats *stats);
+#endif
+
+struct page_pool {
+	struct page_pool_params p;
+
+	struct delayed_work release_dw;
+	void (*disconnect)(void *);
+	unsigned long defer_start;
+	unsigned long defer_warn;
+
+	u32 pages_state_hold_cnt;
+	unsigned int frag_offset;
+	struct page *frag_page;
+	long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+	/* these stats are incremented while in softirq context */
+	struct page_pool_alloc_stats alloc_stats;
+#endif
+	u32 xdp_mem_id;
+
+	/*
+	 * Data structure for allocation side
+	 *
+	 * Drivers allocation side usually already perform some kind
+	 * of resource protection.  Piggyback on this protection, and
+	 * require driver to protect allocation side.
+	 *
+	 * For NIC drivers this means, allocate a page_pool per
+	 * RX-queue. As the RX-queue is already protected by
+	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
+	 * guarantee that a single napi_struct will only be scheduled
+	 * on a single CPU (see napi_schedule).
+	 */
+	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+	/* Data structure for storing recycled pages.
+	 *
+	 * Returning/freeing pages is more complicated synchronization
+	 * wise, because free's can happen on remote CPUs, with no
+	 * association with allocation resource.
+	 *
+	 * Use ptr_ring, as it separates consumer and producer
+	 * effeciently, it a way that doesn't bounce cache-lines.
+	 *
+	 * TODO: Implement bulk return pages into this structure.
+	 */
+	struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+	/* recycle stats are per-cpu to avoid locking */
+	struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+	atomic_t pages_state_release_cnt;
+
+	/* A page_pool is strictly tied to a single RX-queue being
+	 * protected by NAPI, due to above pp_alloc_cache. This
+	 * refcnt serves purpose is to simplify drivers error handling.
+	 */
+	refcount_t user_cnt;
+
+	u64 destroy_cnt;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+				  unsigned int size, gfp_t gfp);
+bool page_pool_return_skb_page(struct page *page, bool napi_safe);
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
+void page_pool_destroy(struct page_pool *pool);
+
+struct xdp_mem_info;
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+			   struct xdp_mem_info *mem);
+void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+			     int count);
+#endif
+
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+				  unsigned int dma_sync_size,
+				  bool allow_direct);
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+
+#endif /* _NET_PAGE_POOL_H */
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 12:13 [PATCH net-next] page_pool: split types and declarations from page_pool.h Yunsheng Lin
@ 2023-07-19 14:01 ` Jeff Johnson
  2023-07-19 16:42 ` Alexander Lobakin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 13+ messages in thread
From: Jeff Johnson @ 2023-07-19 14:01 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Alexander Lobakin, Eric Dumazet, Wei Fang,
	Shenwei Wang, Clark Wang, NXP Linux Team, Sunil Goutham,
	Geetha sowjanya, Subbaraya Sundeep, hariprasad, Saeed Mahameed,
	Leon Romanovsky, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, Felix Fietkau,
	Lorenzo Bianconi, Ryder Lee, Shayne Chen, Sean Wang, Kalle Valo,
	Matthias Brugger, AngeloGioacchino Del Regno, Ilias Apalodimas,
	linux-rdma, bpf, linux-wireless, linux-arm-kernel,
	linux-mediatek

On 7/19/2023 5:13 AM, Yunsheng Lin wrote:
> Split types and pure function declarations from page_pool.h
> and add them in page_page_types.h, so that C sources can

nit: s/page_page_types/page_pool_types/

> include page_pool.h and headers should generally only include
> page_pool_types.h as suggested by jakub.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 12:13 [PATCH net-next] page_pool: split types and declarations from page_pool.h Yunsheng Lin
  2023-07-19 14:01 ` Jeff Johnson
@ 2023-07-19 16:42 ` Alexander Lobakin
  2023-07-20 11:07   ` Yunsheng Lin
  2023-07-20 18:18   ` Alexander Lobakin
  2023-07-19 17:03 ` Alexander Lobakin
  2023-07-24 15:14 ` Simon Horman
  3 siblings, 2 replies; 13+ messages in thread
From: Alexander Lobakin @ 2023-07-19 16:42 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Wed, 19 Jul 2023 20:13:37 +0800

> Split types and pure function declarations from page_pool.h
> and add them in page_page_types.h, so that C sources can
> include page_pool.h and headers should generally only include
> page_pool_types.h as suggested by jakub.
> 
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> Suggested-by: Jakub Kicinski <kuba@kernel.org>
> CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Nice!

Let me take it into my tree, I assume it's safe to say it will be
accepted sooner than my patches :D

BTW, what do you think: is it better to have those two includes in the
root include/net/ folder or do something like

include/net/page_pool/
  * types.h
  * <some meaningful name>.h (let's say driver.h)

like it's done e.g. for GPIO (see include/linux/gpio/)?

Thanks,
Olek

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 12:13 [PATCH net-next] page_pool: split types and declarations from page_pool.h Yunsheng Lin
  2023-07-19 14:01 ` Jeff Johnson
  2023-07-19 16:42 ` Alexander Lobakin
@ 2023-07-19 17:03 ` Alexander Lobakin
  2023-07-24 15:14 ` Simon Horman
  3 siblings, 0 replies; 13+ messages in thread
From: Alexander Lobakin @ 2023-07-19 17:03 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Wed, 19 Jul 2023 20:13:37 +0800

> Split types and pure function declarations from page_pool.h
> and add them in page_page_types.h, so that C sources can
> include page_pool.h and headers should generally only include
> page_pool_types.h as suggested by jakub.

[...]

> diff --git a/include/net/page_pool_types.h b/include/net/page_pool_types.h
> new file mode 100644
> index 000000000000..9dc189082e20
> --- /dev/null
> +++ b/include/net/page_pool_types.h
> @@ -0,0 +1,193 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef _NET_PAGE_POOL_TYPES_H
> +#define _NET_PAGE_POOL_TYPES_H
> +
> +#include <linux/ptr_ring.h>
> +#include <linux/dma-direction.h>

Nit: alphabetic sorting?

> +
> +#define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
> +					* map/unmap
> +					*/
> +#define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
> +					* from page_pool will be
> +					* DMA-synced-for-device according to
> +					* the length provided by the device
> +					* driver.
> +					* Please note DMA-sync-for-CPU is still
> +					* device driver responsibility
> +					*/
[...]

Thanks,
Olek

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 16:42 ` Alexander Lobakin
@ 2023-07-20 11:07   ` Yunsheng Lin
  2023-07-20 16:22     ` Jakub Kicinski
  2023-07-20 18:18   ` Alexander Lobakin
  1 sibling, 1 reply; 13+ messages in thread
From: Yunsheng Lin @ 2023-07-20 11:07 UTC (permalink / raw)
  To: Alexander Lobakin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On 2023/7/20 0:42, Alexander Lobakin wrote:
> 
> BTW, what do you think: is it better to have those two includes in the
> root include/net/ folder or do something like
> 
> include/net/page_pool/
>   * types.h
>   * <some meaningful name>.h (let's say driver.h)
> 
> like it's done e.g. for GPIO (see include/linux/gpio/)?

It make more sense to add a new dir for page pool if there are
more new headers added. As we are still keeping the page_pool.h
mirroring include/linux/gpio.h, adding a new dir for only one
header file only add another level of dir without abvious benefit.
We can add a new dir for it if we turn out to be needing more header
file for page pool in the future, does it make sense?

> 
> Thanks,
> Olek
> 
> .
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-20 11:07   ` Yunsheng Lin
@ 2023-07-20 16:22     ` Jakub Kicinski
  2023-07-21 11:12       ` Yunsheng Lin
  0 siblings, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2023-07-20 16:22 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: Alexander Lobakin, davem, pabeni, netdev, linux-kernel,
	Eric Dumazet, Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On Thu, 20 Jul 2023 19:07:31 +0800 Yunsheng Lin wrote:
> > BTW, what do you think: is it better to have those two includes in the
> > root include/net/ folder or do something like
> > 
> > include/net/page_pool/
> >   * types.h
> >   * <some meaningful name>.h (let's say driver.h)
> > 
> > like it's done e.g. for GPIO (see include/linux/gpio/)?  
> 
> It make more sense to add a new dir for page pool if there are
> more new headers added. As we are still keeping the page_pool.h
> mirroring include/linux/gpio.h, adding a new dir for only one
> header file only add another level of dir without abvious benefit.
> We can add a new dir for it if we turn out to be needing more header
> file for page pool in the future, does it make sense?

It doesn't matter all that much so I think to have some uniformity 
in networking please go with Olek's suggestion which is also my
preference.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 16:42 ` Alexander Lobakin
  2023-07-20 11:07   ` Yunsheng Lin
@ 2023-07-20 18:18   ` Alexander Lobakin
  1 sibling, 0 replies; 13+ messages in thread
From: Alexander Lobakin @ 2023-07-20 18:18 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

From: Alexander Lobakin <aleksander.lobakin@intel.com>
Date: Wed, 19 Jul 2023 18:42:17 +0200

> From: Yunsheng Lin <linyunsheng@huawei.com>
> Date: Wed, 19 Jul 2023 20:13:37 +0800
> 
>> Split types and pure function declarations from page_pool.h
>> and add them in page_page_types.h, so that C sources can
>> include page_pool.h and headers should generally only include
>> page_pool_types.h as suggested by jakub.
>>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>> Suggested-by: Jakub Kicinski <kuba@kernel.org>
>> CC: Alexander Lobakin <aleksander.lobakin@intel.com>
> Nice!
> 
> Let me take it into my tree, I assume it's safe to say it will be
> accepted sooner than my patches :D

FYI: it's already there (since yesterday), including your hybrid
allocation series, so for the next revision you could take it from there
to not rebase it one more time :)
...except that seems like I have to rebase it once again now that you
change the patch to add new folder as Jakub asked.

(it's still the same iavf-pp-frag)

> 
> BTW, what do you think: is it better to have those two includes in the
> root include/net/ folder or do something like
> 
> include/net/page_pool/
>   * types.h
>   * <some meaningful name>.h (let's say driver.h)
> 
> like it's done e.g. for GPIO (see include/linux/gpio/)?
> 
> Thanks,
> Olek

Thanks,
Olek

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-20 16:22     ` Jakub Kicinski
@ 2023-07-21 11:12       ` Yunsheng Lin
  2023-07-21 14:56         ` Jakub Kicinski
  0 siblings, 1 reply; 13+ messages in thread
From: Yunsheng Lin @ 2023-07-21 11:12 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Alexander Lobakin, davem, pabeni, netdev, linux-kernel,
	Eric Dumazet, Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On 2023/7/21 0:22, Jakub Kicinski wrote:
> On Thu, 20 Jul 2023 19:07:31 +0800 Yunsheng Lin wrote:
>>> BTW, what do you think: is it better to have those two includes in the
>>> root include/net/ folder or do something like
>>>
>>> include/net/page_pool/
>>>   * types.h
>>>   * <some meaningful name>.h (let's say driver.h)
>>>
>>> like it's done e.g. for GPIO (see include/linux/gpio/)?  
>>
>> It make more sense to add a new dir for page pool if there are
>> more new headers added. As we are still keeping the page_pool.h
>> mirroring include/linux/gpio.h, adding a new dir for only one
>> header file only add another level of dir without abvious benefit.
>> We can add a new dir for it if we turn out to be needing more header
>> file for page pool in the future, does it make sense?
> 
> It doesn't matter all that much so I think to have some uniformity 
> in networking please go with Olek's suggestion which is also my
> preference.

Just to be clear, include/net/page_pool.h is still there, we are not
putting page_pool.h in include/net/page_pool/ and renaming it to
something else, right? As there is no that kind of uniformity in
include/net/* as far as I can see.

More specificly, yon means the below, right?
include/net/page_pool.h
include/net/page_pool/types.h

> .
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-21 11:12       ` Yunsheng Lin
@ 2023-07-21 14:56         ` Jakub Kicinski
  2023-07-21 15:51           ` Alexander Lobakin
  0 siblings, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2023-07-21 14:56 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: Alexander Lobakin, davem, pabeni, netdev, linux-kernel,
	Eric Dumazet, Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On Fri, 21 Jul 2023 19:12:25 +0800 Yunsheng Lin wrote:
> Just to be clear, include/net/page_pool.h is still there, we are not
> putting page_pool.h in include/net/page_pool/ and renaming it to
> something else, right? As there is no that kind of uniformity in
> include/net/* as far as I can see.

Like many things the uniformity is a plan which mostly exists in my head
at this stage :) But it is somewhat inspired by include/linux/sched.*

> More specificly, yon means the below, right?
> include/net/page_pool.h
> include/net/page_pool/types.h

Yes.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-21 14:56         ` Jakub Kicinski
@ 2023-07-21 15:51           ` Alexander Lobakin
  2023-07-22  1:29             ` Jakub Kicinski
  0 siblings, 1 reply; 13+ messages in thread
From: Alexander Lobakin @ 2023-07-21 15:51 UTC (permalink / raw)
  To: Jakub Kicinski, Yunsheng Lin
  Cc: davem, pabeni, netdev, linux-kernel, Eric Dumazet, Wei Fang,
	Shenwei Wang, Clark Wang, NXP Linux Team, Sunil Goutham,
	Geetha sowjanya, Subbaraya Sundeep, hariprasad, Saeed Mahameed,
	Leon Romanovsky, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, Felix Fietkau,
	Lorenzo Bianconi, Ryder Lee, Shayne Chen, Sean Wang, Kalle Valo,
	Matthias Brugger, AngeloGioacchino Del Regno, Ilias Apalodimas,
	linux-rdma, bpf, linux-wireless, linux-arm-kernel,
	linux-mediatek

From: Jakub Kicinski <kuba@kernel.org>
Date: Fri, 21 Jul 2023 07:56:15 -0700

> On Fri, 21 Jul 2023 19:12:25 +0800 Yunsheng Lin wrote:
>> Just to be clear, include/net/page_pool.h is still there, we are not
>> putting page_pool.h in include/net/page_pool/ and renaming it to
>> something else, right? As there is no that kind of uniformity in
>> include/net/* as far as I can see.
> 
> Like many things the uniformity is a plan which mostly exists in my head
> at this stage :) But it is somewhat inspired by include/linux/sched.*
> 
>> More specificly, yon means the below, right?
>> include/net/page_pool.h
>> include/net/page_pool/types.h
> 
> Yes.

What I meant is

include/net/page_pool/types.h
include/net/page_pool/driver.h

I'm not insisting, just to be clear :)

Thanks,
Olek

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-21 15:51           ` Alexander Lobakin
@ 2023-07-22  1:29             ` Jakub Kicinski
  2023-07-24 10:18               ` Alexander Lobakin
  0 siblings, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2023-07-22  1:29 UTC (permalink / raw)
  To: Alexander Lobakin
  Cc: Yunsheng Lin, davem, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On Fri, 21 Jul 2023 17:51:17 +0200 Alexander Lobakin wrote:
> >> More specificly, yon means the below, right?
> >> include/net/page_pool.h
> >> include/net/page_pool/types.h  
> > 
> > Yes.  
> 
> What I meant is
> 
> include/net/page_pool/types.h
> include/net/page_pool/driver.h
> 
> I'm not insisting, just to be clear :)

I thought we already talked about naming headers after the user :S
Unless you're _defining_ a driver in driver.h that's not a good name.
types.h, helpers.h, functions.h, dma.h are good names.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-22  1:29             ` Jakub Kicinski
@ 2023-07-24 10:18               ` Alexander Lobakin
  0 siblings, 0 replies; 13+ messages in thread
From: Alexander Lobakin @ 2023-07-24 10:18 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Yunsheng Lin, davem, pabeni, netdev, linux-kernel, Eric Dumazet,
	Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

From: Jakub Kicinski <kuba@kernel.org>
Date: Fri, 21 Jul 2023 18:29:42 -0700

> On Fri, 21 Jul 2023 17:51:17 +0200 Alexander Lobakin wrote:
>>>> More specificly, yon means the below, right?
>>>> include/net/page_pool.h
>>>> include/net/page_pool/types.h  
>>>
>>> Yes.  
>>
>> What I meant is
>>
>> include/net/page_pool/types.h
>> include/net/page_pool/driver.h
>>
>> I'm not insisting, just to be clear :)
> 
> I thought we already talked about naming headers after the user :S
> Unless you're _defining_ a driver in driver.h that's not a good name.
> types.h, helpers.h, functions.h, dma.h are good names.

Ah, sure :) I was mostly talking about

include/net/page_pool.h
include/net/page_pool/types.h

vs

include/net/page_pool/helpers.h (or functions.h)
include/net/page_pool/types.h

I.e. whether to have a header with the same name next to the folder or
place everything in that folder.

Thanks,
Olek

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] page_pool: split types and declarations from page_pool.h
  2023-07-19 12:13 [PATCH net-next] page_pool: split types and declarations from page_pool.h Yunsheng Lin
                   ` (2 preceding siblings ...)
  2023-07-19 17:03 ` Alexander Lobakin
@ 2023-07-24 15:14 ` Simon Horman
  3 siblings, 0 replies; 13+ messages in thread
From: Simon Horman @ 2023-07-24 15:14 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Alexander Lobakin,
	Eric Dumazet, Wei Fang, Shenwei Wang, Clark Wang, NXP Linux Team,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Saeed Mahameed, Leon Romanovsky, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Felix Fietkau, Lorenzo Bianconi, Ryder Lee, Shayne Chen,
	Sean Wang, Kalle Valo, Matthias Brugger,
	AngeloGioacchino Del Regno, Ilias Apalodimas, linux-rdma, bpf,
	linux-wireless, linux-arm-kernel, linux-mediatek

On Wed, Jul 19, 2023 at 08:13:37PM +0800, Yunsheng Lin wrote:

Hi Yunsheng,

...

> diff --git a/include/net/page_pool_types.h b/include/net/page_pool_types.h

...

> +struct page_pool {
> +	struct page_pool_params p;
> +
> +	struct delayed_work release_dw;
> +	void (*disconnect)(void *);
> +	unsigned long defer_start;
> +	unsigned long defer_warn;
> +
> +	u32 pages_state_hold_cnt;
> +	unsigned int frag_offset;
> +	struct page *frag_page;
> +	long frag_users;
> +
> +#ifdef CONFIG_PAGE_POOL_STATS
> +	/* these stats are incremented while in softirq context */
> +	struct page_pool_alloc_stats alloc_stats;
> +#endif
> +	u32 xdp_mem_id;
> +
> +	/*
> +	 * Data structure for allocation side
> +	 *
> +	 * Drivers allocation side usually already perform some kind
> +	 * of resource protection.  Piggyback on this protection, and
> +	 * require driver to protect allocation side.
> +	 *
> +	 * For NIC drivers this means, allocate a page_pool per
> +	 * RX-queue. As the RX-queue is already protected by
> +	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
> +	 * guarantee that a single napi_struct will only be scheduled
> +	 * on a single CPU (see napi_schedule).
> +	 */
> +	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
> +
> +	/* Data structure for storing recycled pages.
> +	 *
> +	 * Returning/freeing pages is more complicated synchronization
> +	 * wise, because free's can happen on remote CPUs, with no
> +	 * association with allocation resource.
> +	 *
> +	 * Use ptr_ring, as it separates consumer and producer
> +	 * effeciently, it a way that doesn't bounce cache-lines.

I know this is moved from elsewhere, but: effeciently -> efficiently

> +	 *
> +	 * TODO: Implement bulk return pages into this structure.
> +	 */
> +	struct ptr_ring ring;
> +
> +#ifdef CONFIG_PAGE_POOL_STATS
> +	/* recycle stats are per-cpu to avoid locking */
> +	struct page_pool_recycle_stats __percpu *recycle_stats;
> +#endif
> +	atomic_t pages_state_release_cnt;
> +
> +	/* A page_pool is strictly tied to a single RX-queue being
> +	 * protected by NAPI, due to above pp_alloc_cache. This
> +	 * refcnt serves purpose is to simplify drivers error handling.
> +	 */
> +	refcount_t user_cnt;
> +
> +	u64 destroy_cnt;
> +};

...

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2023-07-24 15:14 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-19 12:13 [PATCH net-next] page_pool: split types and declarations from page_pool.h Yunsheng Lin
2023-07-19 14:01 ` Jeff Johnson
2023-07-19 16:42 ` Alexander Lobakin
2023-07-20 11:07   ` Yunsheng Lin
2023-07-20 16:22     ` Jakub Kicinski
2023-07-21 11:12       ` Yunsheng Lin
2023-07-21 14:56         ` Jakub Kicinski
2023-07-21 15:51           ` Alexander Lobakin
2023-07-22  1:29             ` Jakub Kicinski
2023-07-24 10:18               ` Alexander Lobakin
2023-07-20 18:18   ` Alexander Lobakin
2023-07-19 17:03 ` Alexander Lobakin
2023-07-24 15:14 ` Simon Horman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).