All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: netdev@vger.kernel.org
Cc: Marc Dionne <marc.dionne@auristor.com>,
	linux-afs@lists.infradead.org, dhowells@redhat.com,
	linux-afs@lists.infradead.org, linux-kernel@vger.kernel.org
Subject: [PATCH net-next 11/36] rxrpc: trace: Don't use __builtin_return_address for rxrpc_peer tracing
Date: Fri, 02 Dec 2022 00:16:35 +0000	[thread overview]
Message-ID: <166994019574.1732290.17398431845997866728.stgit@warthog.procyon.org.uk> (raw)
In-Reply-To: <166994010342.1732290.13771061038178613124.stgit@warthog.procyon.org.uk>

In rxrpc tracing, use enums to generate lists of points of interest rather
than __builtin_return_address() for the rxrpc_peer tracepoint

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
---

 include/trace/events/rxrpc.h |   43 +++++++++++++++++++++++++-----------------
 net/rxrpc/af_rxrpc.c         |    2 +-
 net/rxrpc/ar-internal.h      |   11 ++++++-----
 net/rxrpc/call_accept.c      |    8 +++++---
 net/rxrpc/call_object.c      |    2 +-
 net/rxrpc/conn_client.c      |    8 ++++----
 net/rxrpc/conn_object.c      |    2 +-
 net/rxrpc/peer_event.c       |    8 ++++----
 net/rxrpc/peer_object.c      |   34 ++++++++++++++++-----------------
 net/rxrpc/sendmsg.c          |    2 +-
 10 files changed, 65 insertions(+), 55 deletions(-)

diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 015569845b1d..1c74143a51c1 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -63,10 +63,23 @@
 	E_(rxrpc_local_use_work,		"USE work    ")
 
 #define rxrpc_peer_traces \
-	EM(rxrpc_peer_got,			"GOT") \
-	EM(rxrpc_peer_new,			"NEW") \
-	EM(rxrpc_peer_processing,		"PRO") \
-	E_(rxrpc_peer_put,			"PUT")
+	EM(rxrpc_peer_free,			"FREE        ") \
+	EM(rxrpc_peer_get_accept,		"GET accept  ") \
+	EM(rxrpc_peer_get_activate_call,	"GET act-call") \
+	EM(rxrpc_peer_get_bundle,		"GET bundle  ") \
+	EM(rxrpc_peer_get_client_conn,		"GET cln-conn") \
+	EM(rxrpc_peer_get_input_error,		"GET inpt-err") \
+	EM(rxrpc_peer_get_keepalive,		"GET keepaliv") \
+	EM(rxrpc_peer_get_lookup_client,	"GET look-cln") \
+	EM(rxrpc_peer_get_service_conn,		"GET srv-conn") \
+	EM(rxrpc_peer_new_client,		"NEW client  ") \
+	EM(rxrpc_peer_new_prealloc,		"NEW prealloc") \
+	EM(rxrpc_peer_put_bundle,		"PUT bundle  ") \
+	EM(rxrpc_peer_put_call,			"PUT call    ") \
+	EM(rxrpc_peer_put_conn,			"PUT conn    ") \
+	EM(rxrpc_peer_put_discard_tmp,		"PUT disc-tmp") \
+	EM(rxrpc_peer_put_input_error,		"PUT inpt-err") \
+	E_(rxrpc_peer_put_keepalive,		"PUT keepaliv")
 
 #define rxrpc_conn_traces \
 	EM(rxrpc_conn_got,			"GOT") \
@@ -394,30 +407,26 @@ TRACE_EVENT(rxrpc_local,
 	    );
 
 TRACE_EVENT(rxrpc_peer,
-	    TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
-		     int usage, const void *where),
+	    TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
 
-	    TP_ARGS(peer_debug_id, op, usage, where),
+	    TP_ARGS(peer_debug_id, ref, why),
 
 	    TP_STRUCT__entry(
 		    __field(unsigned int,	peer		)
-		    __field(int,		op		)
-		    __field(int,		usage		)
-		    __field(const void *,	where		)
+		    __field(int,		ref		)
+		    __field(int,		why		)
 			     ),
 
 	    TP_fast_assign(
 		    __entry->peer = peer_debug_id;
-		    __entry->op = op;
-		    __entry->usage = usage;
-		    __entry->where = where;
+		    __entry->ref = ref;
+		    __entry->why = why;
 			   ),
 
-	    TP_printk("P=%08x %s u=%d sp=%pSR",
+	    TP_printk("P=%08x %s r=%d",
 		      __entry->peer,
-		      __print_symbolic(__entry->op, rxrpc_peer_traces),
-		      __entry->usage,
-		      __entry->where)
+		      __print_symbolic(__entry->why, rxrpc_peer_traces),
+		      __entry->ref)
 	    );
 
 TRACE_EVENT(rxrpc_conn,
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 989ebca899f3..7a0dc01741e7 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -328,7 +328,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
 		mutex_unlock(&call->user_mutex);
 	}
 
-	rxrpc_put_peer(cp.peer);
+	rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
 	_leave(" = %p", call);
 	return call;
 }
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index dde9ce21ef48..6cb111e9761c 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1063,14 +1063,15 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
 					 const struct sockaddr_rxrpc *);
 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
 				     struct sockaddr_rxrpc *, gfp_t);
-struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
+struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
+				    enum rxrpc_peer_trace);
 void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
 			     struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
-struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
-struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
-void rxrpc_put_peer(struct rxrpc_peer *);
-void rxrpc_put_peer_locked(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
+struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
+void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
+void rxrpc_put_peer_locked(struct rxrpc_peer *, enum rxrpc_peer_trace);
 
 /*
  * proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 1b12d4e28373..f6bc3b07c3e5 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -70,7 +70,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 	head = b->peer_backlog_head;
 	tail = READ_ONCE(b->peer_backlog_tail);
 	if (CIRC_CNT(head, tail, size) < max) {
-		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
+		struct rxrpc_peer *peer;
+
+		peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
 		if (!peer)
 			return -ENOMEM;
 		b->peer_backlog[head] = peer;
@@ -286,7 +288,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
 		return NULL;
 
 	if (!conn) {
-		if (peer && !rxrpc_get_peer_maybe(peer))
+		if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
 			peer = NULL;
 		if (!peer) {
 			peer = b->peer_backlog[peer_tail];
@@ -323,7 +325,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
 	call->conn = conn;
 	call->security = conn->security;
 	call->security_ix = conn->security_ix;
-	call->peer = rxrpc_get_peer(conn->peer);
+	call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
 	call->cong_ssthresh = call->peer->cong_ssthresh;
 	call->tx_last_sent = ktime_get_real();
 	return call;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 59928f0a8fe1..1b725afd6e2c 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -636,7 +636,7 @@ static void rxrpc_destroy_call(struct work_struct *work)
 	rxrpc_delete_call_timer(call);
 
 	rxrpc_put_connection(call->conn);
-	rxrpc_put_peer(call->peer);
+	rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
 	kmem_cache_free(rxrpc_call_jar, call);
 	if (atomic_dec_and_test(&rxnet->nr_calls))
 		wake_up_var(&rxnet->nr_calls);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 9a69b4c1b182..9444da235a48 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -123,7 +123,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
 	bundle = kzalloc(sizeof(*bundle), gfp);
 	if (bundle) {
 		bundle->local		= cp->local;
-		bundle->peer		= rxrpc_get_peer(cp->peer);
+		bundle->peer		= rxrpc_get_peer(cp->peer, rxrpc_peer_get_bundle);
 		bundle->key		= cp->key;
 		bundle->exclusive	= cp->exclusive;
 		bundle->upgrade		= cp->upgrade;
@@ -145,7 +145,7 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
 
 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
 {
-	rxrpc_put_peer(bundle->peer);
+	rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
 	kfree(bundle);
 }
 
@@ -207,7 +207,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
 	write_unlock(&rxnet->conn_lock);
 
 	rxrpc_get_bundle(bundle);
-	rxrpc_get_peer(conn->peer);
+	rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn);
 	rxrpc_get_local(conn->local, rxrpc_local_get_client_conn);
 	key_get(conn->key);
 
@@ -543,7 +543,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
 
 	rxrpc_see_call(call);
 	list_del_init(&call->chan_wait_link);
-	call->peer	= rxrpc_get_peer(conn->peer);
+	call->peer	= rxrpc_get_peer(conn->peer, rxrpc_peer_get_activate_call);
 	call->conn	= rxrpc_get_connection(conn);
 	call->cid	= conn->proto.cid | channel;
 	call->call_id	= call_id;
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 725359afeac0..554ee5dd3325 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -362,7 +362,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
 	conn->security->clear(conn);
 	key_put(conn->key);
 	rxrpc_put_bundle(conn->bundle);
-	rxrpc_put_peer(conn->peer);
+	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
 
 	if (atomic_dec_and_test(&conn->local->rxnet->nr_conns))
 		wake_up_var(&conn->local->rxnet->nr_conns);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 3f8d104ecaa7..5e97d321ac38 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -168,7 +168,7 @@ void rxrpc_error_report(struct sock *sk)
 	}
 
 	peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
-	if (peer && !rxrpc_get_peer_maybe(peer))
+	if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error))
 		peer = NULL;
 	if (!peer) {
 		rcu_read_unlock();
@@ -190,7 +190,7 @@ void rxrpc_error_report(struct sock *sk)
 out:
 	rcu_read_unlock();
 	rxrpc_free_skb(skb, rxrpc_skb_freed);
-	rxrpc_put_peer(peer);
+	rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
 
 	_leave("");
 }
@@ -263,7 +263,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
 				  struct rxrpc_peer, keepalive_link);
 
 		list_del_init(&peer->keepalive_link);
-		if (!rxrpc_get_peer_maybe(peer))
+		if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
 			continue;
 
 		if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) {
@@ -291,7 +291,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
 				      &rxnet->peer_keepalive[slot & mask]);
 			rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
 		}
-		rxrpc_put_peer_locked(peer);
+		rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive);
 	}
 
 	spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index bcef897560e7..9e682a60a800 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -205,9 +205,9 @@ static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
 /*
  * Allocate a peer.
  */
-struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
+struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
+				    enum rxrpc_peer_trace why)
 {
-	const void *here = __builtin_return_address(0);
 	struct rxrpc_peer *peer;
 
 	_enter("");
@@ -226,7 +226,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
 		rxrpc_peer_init_rtt(peer);
 
 		peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
-		trace_rxrpc_peer(peer->debug_id, rxrpc_peer_new, 1, here);
+		trace_rxrpc_peer(peer->debug_id, why, 1);
 	}
 
 	_leave(" = %p", peer);
@@ -282,7 +282,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
 
 	_enter("");
 
-	peer = rxrpc_alloc_peer(local, gfp);
+	peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
 	if (peer) {
 		memcpy(&peer->srx, srx, sizeof(*srx));
 		rxrpc_init_peer(rx, peer, hash_key);
@@ -294,6 +294,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
 
 static void rxrpc_free_peer(struct rxrpc_peer *peer)
 {
+	trace_rxrpc_peer(peer->debug_id, 0, rxrpc_peer_free);
 	rxrpc_put_local(peer->local, rxrpc_local_put_peer);
 	kfree_rcu(peer, rcu);
 }
@@ -334,7 +335,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
 	/* search the peer list first */
 	rcu_read_lock();
 	peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
-	if (peer && !rxrpc_get_peer_maybe(peer))
+	if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
 		peer = NULL;
 	rcu_read_unlock();
 
@@ -352,7 +353,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
 
 		/* Need to check that we aren't racing with someone else */
 		peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
-		if (peer && !rxrpc_get_peer_maybe(peer))
+		if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
 			peer = NULL;
 		if (!peer) {
 			hash_add_rcu(rxnet->peer_hash,
@@ -376,27 +377,26 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
 /*
  * Get a ref on a peer record.
  */
-struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
+struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
 {
-	const void *here = __builtin_return_address(0);
 	int r;
 
 	__refcount_inc(&peer->ref, &r);
-	trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
+	trace_rxrpc_peer(peer->debug_id, why, r + 1);
 	return peer;
 }
 
 /*
  * Get a ref on a peer record unless its usage has already reached 0.
  */
-struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
+struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer,
+					enum rxrpc_peer_trace why)
 {
-	const void *here = __builtin_return_address(0);
 	int r;
 
 	if (peer) {
 		if (__refcount_inc_not_zero(&peer->ref, &r))
-			trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
+			trace_rxrpc_peer(peer->debug_id, r + 1, why);
 		else
 			peer = NULL;
 	}
@@ -423,9 +423,8 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 /*
  * Drop a ref on a peer record.
  */
-void rxrpc_put_peer(struct rxrpc_peer *peer)
+void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
 {
-	const void *here = __builtin_return_address(0);
 	unsigned int debug_id;
 	bool dead;
 	int r;
@@ -433,7 +432,7 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 	if (peer) {
 		debug_id = peer->debug_id;
 		dead = __refcount_dec_and_test(&peer->ref, &r);
-		trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
+		trace_rxrpc_peer(debug_id, r - 1, why);
 		if (dead)
 			__rxrpc_put_peer(peer);
 	}
@@ -443,15 +442,14 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
  * Drop a ref on a peer record where the caller already holds the
  * peer_hash_lock.
  */
-void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
 {
-	const void *here = __builtin_return_address(0);
 	unsigned int debug_id = peer->debug_id;
 	bool dead;
 	int r;
 
 	dead = __refcount_dec_and_test(&peer->ref, &r);
-	trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
+	trace_rxrpc_peer(debug_id, r - 1, why);
 	if (dead) {
 		hash_del_rcu(&peer->hash_link);
 		list_del_init(&peer->keepalive_link);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index e5fd8a95bf71..cfe0badba0b3 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -604,7 +604,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
 				     atomic_inc_return(&rxrpc_debug_id));
 	/* The socket is now unlocked */
 
-	rxrpc_put_peer(cp.peer);
+	rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
 	_leave(" = %p\n", call);
 	return call;
 }



  parent reply	other threads:[~2022-12-02  0:18 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-02  0:15 [PATCH net-next 00/36] rxrpc: Increasing SACK size and moving away from softirq, parts 2 & 3 David Howells
2022-12-02  0:15 ` [PATCH net-next 01/36] rxrpc: Fix checker warning David Howells
2022-12-02  0:15 ` [PATCH net-next 02/36] rxrpc: Implement an in-kernel rxperf server for testing purposes David Howells
2022-12-02  0:15 ` [PATCH net-next 03/36] rxrpc: Fix call leak David Howells
2022-12-02  0:15 ` [PATCH net-next 04/36] rxrpc: Remove decl for rxrpc_kernel_call_is_complete() David Howells
2022-12-02  0:15 ` [PATCH net-next 05/36] rxrpc: Remove handling of duplicate packets in recvmsg_queue David Howells
2022-12-02  0:15 ` [PATCH net-next 06/36] rxrpc: Remove the [k_]proto() debugging macros David Howells
2022-12-02  0:16 ` [PATCH net-next 07/36] rxrpc: Remove the [_k]net() " David Howells
2022-12-02  0:16 ` [PATCH net-next 08/36] rxrpc: Drop rxrpc_conn_parameters from rxrpc_connection and rxrpc_bundle David Howells
2022-12-02  0:16 ` [PATCH net-next 09/36] rxrpc: Extract the code from a received ABORT packet much earlier David Howells
2022-12-02  0:16 ` [PATCH net-next 10/36] rxrpc: trace: Don't use __builtin_return_address for rxrpc_local tracing David Howells
2022-12-02  0:16 ` David Howells [this message]
2022-12-02  0:16 ` [PATCH net-next 12/36] rxrpc: trace: Don't use __builtin_return_address for rxrpc_conn tracing David Howells
2022-12-02  0:16 ` [PATCH net-next 13/36] rxrpc: trace: Don't use __builtin_return_address for rxrpc_call tracing David Howells
2022-12-02  0:17 ` [PATCH net-next 14/36] rxrpc: Trace rxrpc_bundle refcount David Howells
2022-12-02  0:17 ` [PATCH net-next 15/36] rxrpc: trace: Don't use __builtin_return_address for sk_buff tracing David Howells
2022-12-02  0:17 ` [PATCH net-next 16/36] rxrpc: Don't hold a ref for call timer or workqueue David Howells
2022-12-02  0:17 ` [PATCH net-next 17/36] rxrpc: Don't hold a ref for connection workqueue David Howells
2022-12-02  0:17 ` [PATCH net-next 18/36] rxrpc: Split the receive code David Howells
2022-12-02  0:17 ` [PATCH net-next 19/36] rxrpc: Create a per-local endpoint receive queue and I/O thread David Howells
2022-12-02  0:17 ` [PATCH net-next 20/36] rxrpc: Move packet reception processing into " David Howells
2022-12-02  0:18 ` [PATCH net-next 21/36] rxrpc: Move error processing into the local endpoint " David Howells
2022-12-02  0:18 ` [PATCH net-next 22/36] rxrpc: Remove call->input_lock David Howells
2022-12-02  0:18 ` [PATCH net-next 23/36] rxrpc: Don't use sk->sk_receive_queue.lock to guard socket state changes David Howells
2022-12-02  0:18 ` [PATCH net-next 24/36] rxrpc: Implement a mechanism to send an event notification to a call David Howells
2022-12-02  0:18 ` [PATCH net-next 25/36] rxrpc: Copy client call parameters into rxrpc_call earlier David Howells
2022-12-02  0:18 ` [PATCH net-next 26/36] rxrpc: Move DATA transmission into call processor work item David Howells
2022-12-02  0:18 ` [PATCH net-next 27/36] rxrpc: Remove RCU from peer->error_targets list David Howells
2022-12-02  0:19 ` [PATCH net-next 28/36] rxrpc: Simplify skbuff accounting in receive path David Howells
2022-12-02  0:19 ` [PATCH net-next 29/36] rxrpc: Reduce the use of RCU in packet input David Howells
2022-12-02  0:19 ` [PATCH net-next 30/36] rxrpc: Extract the peer address from an incoming packet earlier David Howells
2022-12-02  0:19 ` [PATCH net-next 31/36] rxrpc: Make the I/O thread take over the call and local processor work David Howells
2022-12-02  0:19 ` [PATCH net-next 32/36] rxrpc: Remove the _bh annotation from all the spinlocks David Howells
2022-12-02  0:19 ` [PATCH net-next 33/36] rxrpc: Trace/count transmission underflows and cwnd resets David Howells
2022-12-02  0:19 ` [PATCH net-next 34/36] rxrpc: Move the cwnd degradation after transmitting packets David Howells
2022-12-02  0:20 ` [PATCH net-next 35/36] rxrpc: Fold __rxrpc_unuse_local() into rxrpc_unuse_local() David Howells
2022-12-02  0:20 ` [PATCH net-next 36/36] rxrpc: Transmit ACKs at the point of generation David Howells
2022-12-05 11:10 ` [PATCH net-next 00/36] rxrpc: Increasing SACK size and moving away from softirq, parts 2 & 3 patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=166994019574.1732290.17398431845997866728.stgit@warthog.procyon.org.uk \
    --to=dhowells@redhat.com \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=marc.dionne@auristor.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.