bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] libbpf: export inline helpers as symbols for xsk
@ 2021-04-26 20:22 Kumar Kartikeya Dwivedi
  2021-04-26 23:20 ` Andrii Nakryiko
  0 siblings, 1 reply; 4+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-04-26 20:22 UTC (permalink / raw)
  To: bpf
  Cc: Kumar Kartikeya Dwivedi, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Björn Töpel, Magnus Karlsson,
	Jonathan Lemon, David S. Miller, Jakub Kicinski,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

This helps people writing language bindings to not have to rewrite C
wrappers for inline functions in the headers. We force inline the
definition from the header for C and C++ consumers, but also export a
symbol in the library for others. This keeps the performance
advantages similar to using static inline, while also allowing tools
like Rust's bindgen to generate wrappers for the functions.

Also see
https://lore.kernel.org/bpf/CAJ8uoz0QqR97qEYYK=VVCE9A=V=k2tKnH6wNM48jeak2RAmL0A@mail.gmail.com/
for some context.

Also see https://github.com/xdp-project/xdp-tools/pull/97 for more
discussion on the same.

extern inline is used as it's slightly better since it warns when an
inline definition is missing.

The fvisibility attribute goes on the inline definition, as essentially
it acts as a declaration for the function, while the extern inline
declaration ends up acting as a definition.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 tools/lib/bpf/libbpf.map | 16 ++++++++++++++
 tools/lib/bpf/xsk.c      | 24 +++++++++++++++++++++
 tools/lib/bpf/xsk.h      | 45 +++++++++++++++++++++++-----------------
 3 files changed, 66 insertions(+), 19 deletions(-)

diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b9b29baf1df8..52ece4296f4b 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -361,4 +361,20 @@ LIBBPF_0.4.0 {
 		bpf_linker__new;
 		bpf_map__inner_map;
 		bpf_object__set_kversion;
+		xsk_cons_nb_avail;
+		xsk_prod_nb_free;
+		xsk_ring_cons__cancel;
+		xsk_ring_cons__comp_addr;
+		xsk_ring_cons__peek;
+		xsk_ring_cons__release;
+		xsk_ring_cons__rx_desc;
+		xsk_ring_prod__fill_addr;
+		xsk_ring_prod__needs_wakeup;
+		xsk_ring_prod__reserve;
+		xsk_ring_prod__submit;
+		xsk_ring_prod__tx_desc;
+		xsk_umem__add_offset_to_addr;
+		xsk_umem__extract_addr;
+		xsk_umem__extract_offset;
+		xsk_umem__get_data;
 } LIBBPF_0.3.0;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 95da0e19f4a5..ebe370837024 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -107,6 +107,30 @@ struct xdp_mmap_offsets_v1 {
 	struct xdp_ring_offset_v1 cr;
 };
 
+/* Export all inline helpers as symbols for use by language bindings. */
+extern inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
+					      __u32 idx);
+extern inline const __u64 *
+xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx);
+extern inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
+						      __u32 idx);
+extern inline const struct xdp_desc *
+xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx);
+extern inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r);
+extern inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb);
+extern inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb);
+extern inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb,
+					   __u32 *idx);
+extern inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb);
+extern inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb,
+					__u32 *idx);
+extern inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb);
+extern inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb);
+extern inline void *xsk_umem__get_data(void *umem_area, __u64 addr);
+extern inline __u64 xsk_umem__extract_addr(__u64 addr);
+extern inline __u64 xsk_umem__extract_offset(__u64 addr);
+extern inline __u64 xsk_umem__add_offset_to_addr(__u64 addr);
+
 int xsk_umem__fd(const struct xsk_umem *umem)
 {
 	return umem ? umem->fd : -EINVAL;
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 01c12dca9c10..8ab1d0453fbe 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -111,15 +111,15 @@ DEFINE_XSK_RING(xsk_ring_cons);
 struct xsk_umem;
 struct xsk_socket;
 
-static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
-					      __u32 idx)
+LIBBPF_API __always_inline __u64 *
+xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx)
 {
 	__u64 *addrs = (__u64 *)fill->ring;
 
 	return &addrs[idx & fill->mask];
 }
 
-static inline const __u64 *
+LIBBPF_API __always_inline const __u64 *
 xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
 {
 	const __u64 *addrs = (const __u64 *)comp->ring;
@@ -127,15 +127,15 @@ xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
 	return &addrs[idx & comp->mask];
 }
 
-static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
-						      __u32 idx)
+LIBBPF_API __always_inline struct xdp_desc *
+xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx)
 {
 	struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
 
 	return &descs[idx & tx->mask];
 }
 
-static inline const struct xdp_desc *
+LIBBPF_API __always_inline const struct xdp_desc *
 xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
 {
 	const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
@@ -143,12 +143,14 @@ xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
 	return &descs[idx & rx->mask];
 }
 
-static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
+LIBBPF_API __always_inline int
+xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
 {
 	return *r->flags & XDP_RING_NEED_WAKEUP;
 }
 
-static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
+LIBBPF_API __always_inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r,
+						  __u32 nb)
 {
 	__u32 free_entries = r->cached_cons - r->cached_prod;
 
@@ -168,7 +170,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
 	return r->cached_cons - r->cached_prod;
 }
 
-static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
+LIBBPF_API __always_inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r,
+						   __u32 nb)
 {
 	__u32 entries = r->cached_prod - r->cached_cons;
 
@@ -180,7 +183,8 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
 	return (entries > nb) ? nb : entries;
 }
 
-static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
+LIBBPF_API __always_inline __u32
+xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
 {
 	if (xsk_prod_nb_free(prod, nb) < nb)
 		return 0;
@@ -191,7 +195,8 @@ static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb,
 	return nb;
 }
 
-static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
+LIBBPF_API __always_inline void
+xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
 {
 	/* Make sure everything has been written to the ring before indicating
 	 * this to the kernel by writing the producer pointer.
@@ -199,7 +204,8 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
 	libbpf_smp_store_release(prod->producer, *prod->producer + nb);
 }
 
-static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
+LIBBPF_API __always_inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons,
+						     __u32 nb, __u32 *idx)
 {
 	__u32 entries = xsk_cons_nb_avail(cons, nb);
 
@@ -211,36 +217,37 @@ static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __
 	return entries;
 }
 
-static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
+LIBBPF_API __always_inline void
+xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
 {
 	cons->cached_cons -= nb;
 }
 
-static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
+LIBBPF_API __always_inline void
+xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
 {
 	/* Make sure data has been read before indicating we are done
 	 * with the entries by updating the consumer pointer.
 	 */
 	libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
-
 }
 
-static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
+LIBBPF_API __always_inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
 {
 	return &((char *)umem_area)[addr];
 }
 
-static inline __u64 xsk_umem__extract_addr(__u64 addr)
+LIBBPF_API __always_inline __u64 xsk_umem__extract_addr(__u64 addr)
 {
 	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
 }
 
-static inline __u64 xsk_umem__extract_offset(__u64 addr)
+LIBBPF_API __always_inline __u64 xsk_umem__extract_offset(__u64 addr)
 {
 	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 }
 
-static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
+LIBBPF_API __always_inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
 {
 	return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
 }
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-04-27 18:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-26 20:22 [PATCH] libbpf: export inline helpers as symbols for xsk Kumar Kartikeya Dwivedi
2021-04-26 23:20 ` Andrii Nakryiko
2021-04-27  6:49   ` Magnus Karlsson
2021-04-27 18:05     ` Andrii Nakryiko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).