All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: davem@davemloft.net
Cc: dhowells@redhat.com, netdev@vger.kernel.org,
	linux-afs@lists.infradead.org, linux-kernel@vger.kernel.org
Subject: [PATCH net-next 11/24] rxrpc: Release a call's connection ref on call disconnection
Date: Tue, 05 Jul 2016 14:13:29 +0100	[thread overview]
Message-ID: <146772440976.21657.10583100542600642704.stgit@warthog.procyon.org.uk> (raw)
In-Reply-To: <146772433082.21657.14046392058484946464.stgit@warthog.procyon.org.uk>

When a call is disconnected, clear the call's pointer to the connection and
release the associated ref on that connection.  This means that the call no
longer pins the connection and the connection can be discarded even before
the call is.

As the code currently stands, the call struct is effectively pinned by
userspace until userspace has enacted a recvmsg() to retrieve the final
call state as sk_buffs on the receive queue pin the call to which they're
related because:

 (1) The rxrpc_call struct contains the userspace ID that recvmsg() has to
     include in the control message buffer to indicate which call is being
     referred to.  This ID must remain valid until the terminal packet is
     completely read and must be invalidated immediately at that point as
     userspace is entitled to immediately reuse it.

 (2) The final ACK to the reply to a client call isn't sent until the last
     data packet is entirely read (it's probably worth altering this in
     future to be send the ACK as soon as all the data has been received).


This change requires a bit of rearrangement to make sure that the call
isn't going to try and access the connection again after protocol
completion:

 (1) Delete the error link earlier when we're releasing the call.  Possibly
     network errors should be distributed via connections at the cost of
     adding in an access to the rxrpc_connection struct.

 (2) Remove the call from the connection's call tree before disconnecting
     the call.  The call tree needs to be removed anyway and incoming
     packets delivered by channel pointer instead.

 (3) The release call event should be considered last after all other
     events have been processed so that we don't need access to the
     connection again.

 (4) Move the channel_lock taking from rxrpc_release_call() to
     rxrpc_disconnect_call() where it will be required in future.

Signed-off-by: David Howells <dhowells@redhat.com>
---

 net/rxrpc/call_event.c  |   10 +++++-----
 net/rxrpc/call_object.c |   26 +++++++++-----------------
 net/rxrpc/conn_object.c |    8 ++++++++
 3 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 0ba84295f913..638d66df284a 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -858,11 +858,6 @@ void rxrpc_process_call(struct work_struct *work)
 	iov[0].iov_len	= sizeof(whdr);
 
 	/* deal with events of a final nature */
-	if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
-		rxrpc_release_call(call);
-		clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
-	}
-
 	if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
 		enum rxrpc_skb_mark mark;
 		int error;
@@ -1144,6 +1139,11 @@ void rxrpc_process_call(struct work_struct *work)
 		goto maybe_reschedule;
 	}
 
+	if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
+		rxrpc_release_call(call);
+		clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
+	}
+
 	/* other events may have been raised since we started checking */
 	goto maybe_reschedule;
 
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 6223a7ed831f..b43d89c89744 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -628,6 +628,10 @@ void rxrpc_release_call(struct rxrpc_call *call)
 	 */
 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
+	spin_lock(&conn->params.peer->lock);
+	hlist_del_init(&call->error_link);
+	spin_unlock(&conn->params.peer->lock);
+
 	write_lock_bh(&rx->call_lock);
 	if (!list_empty(&call->accept_link)) {
 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
@@ -643,25 +647,22 @@ void rxrpc_release_call(struct rxrpc_call *call)
 	write_unlock_bh(&rx->call_lock);
 
 	/* free up the channel for reuse */
-	spin_lock(&conn->channel_lock);
 	write_lock_bh(&conn->lock);
 	write_lock(&call->state_lock);
 
-	rxrpc_disconnect_call(call);
-
-	spin_unlock(&conn->channel_lock);
-
 	if (call->state < RXRPC_CALL_COMPLETE &&
 	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
 		_debug("+++ ABORTING STATE %d +++\n", call->state);
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
 		call->local_abort = RX_CALL_DEAD;
-		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
-		rxrpc_queue_call(call);
 	}
 	write_unlock(&call->state_lock);
+
+	rb_erase(&call->conn_node, &conn->calls);
 	write_unlock_bh(&conn->lock);
 
+	rxrpc_disconnect_call(call);
+
 	/* clean up the Rx queue */
 	if (!skb_queue_empty(&call->rx_queue) ||
 	    !skb_queue_empty(&call->rx_oos_queue)) {
@@ -817,16 +818,7 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call)
 		return;
 	}
 
-	if (call->conn) {
-		spin_lock(&call->conn->params.peer->lock);
-		hlist_del_init(&call->error_link);
-		spin_unlock(&call->conn->params.peer->lock);
-
-		write_lock_bh(&call->conn->lock);
-		rb_erase(&call->conn_node, &call->conn->calls);
-		write_unlock_bh(&call->conn->lock);
-		rxrpc_put_connection(call->conn);
-	}
+	ASSERTCMP(call->conn, ==, NULL);
 
 	/* Remove the call from the hash */
 	rxrpc_call_hash_del(call);
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index f28578b31281..0c84ce4ecc1d 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -543,11 +543,19 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 
 	_enter("%d,%d", conn->debug_id, call->channel);
 
+	spin_lock(&conn->channel_lock);
+
 	if (conn->channels[chan] == call) {
 		rcu_assign_pointer(conn->channels[chan], NULL);
 		atomic_inc(&conn->avail_chans);
 		wake_up(&conn->channel_wq);
 	}
+
+	spin_unlock(&conn->channel_lock);
+
+	call->conn = NULL;
+	rxrpc_put_connection(conn);
+	_leave("");
 }
 
 /*

  parent reply	other threads:[~2016-07-05 13:13 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-05 13:12 [PATCH net-next 00/24] rxrpc: Improve conn/call lookup and fix call number generation [ver #2] David Howells
2016-07-05 13:12 ` [PATCH net-next 01/24] rxrpc: Fix processing of authenticated/encrypted jumbo packets David Howells
2016-07-05 17:55   ` Sergei Shtylyov
2016-07-05 19:26   ` David Howells
2016-07-05 13:12 ` [PATCH net-next 02/24] rxrpc: Fix some sparse errors David Howells
2016-07-05 13:12 ` [PATCH net-next 03/24] rxrpc: Check the source of a packet to a client conn David Howells
2016-07-05 13:12 ` [PATCH net-next 04/24] rxrpc: Avoid using stack memory in SG lists in rxkad David Howells
2016-07-06 13:19   ` Andy Lutomirski
2016-07-06 15:03   ` David Howells
2016-07-05 13:12 ` [PATCH net-next 05/24] rxrpc: Provide more refcount helper functions David Howells
2016-07-05 17:16   ` David Miller
2016-07-05 19:15   ` David Howells
2016-07-05 19:50   ` David Howells
2016-07-05 13:12 ` [PATCH net-next 06/24] rxrpc: Dup the main conn list for the proc interface David Howells
2016-07-05 17:17   ` David Miller
2016-07-05 19:21   ` David Howells
2016-07-05 19:53   ` David Howells
2016-07-05 13:13 ` [PATCH net-next 07/24] rxrpc: Turn connection #defines into enums and put outside struct def David Howells
2016-07-05 13:13 ` [PATCH net-next 08/24] rxrpc: Check that the client conns cache is empty before module removal David Howells
2016-07-05 13:13 ` [PATCH net-next 09/24] rxrpc: Move usage count getting into rxrpc_queue_conn() David Howells
2016-07-05 13:13 ` [PATCH net-next 10/24] rxrpc: Fix handling of connection failure in client call creation David Howells
2016-07-05 13:13 ` David Howells [this message]
2016-07-05 13:13 ` [PATCH net-next 12/24] rxrpc: Add RCU destruction for connections and calls David Howells
2016-07-05 13:13 ` [PATCH net-next 13/24] rxrpc: Access socket accept queue under right lock David Howells
2016-07-05 13:13 ` [PATCH net-next 14/24] rxrpc: Call channels should have separate call number spaces David Howells
2016-07-05 13:13 ` [PATCH net-next 15/24] rxrpc: Split client connection code out into its own file David Howells
2016-07-05 13:14 ` [PATCH net-next 16/24] rxrpc: Split service " David Howells
2016-07-05 13:14 ` [PATCH net-next 17/24] rxrpc: Move peer lookup from call-accept to new-incoming-conn David Howells
2016-07-05 13:14 ` [PATCH net-next 18/24] rxrpc: Maintain an extra ref on a conn for the cache list David Howells
2016-07-05 13:14 ` [PATCH net-next 19/24] rxrpc: Prune the contents of the rxrpc_conn_proto struct David Howells
2016-07-05 13:14 ` [PATCH net-next 20/24] rxrpc: Move data_ready peer lookup into rxrpc_find_connection() David Howells
2016-07-05 13:14 ` [PATCH net-next 21/24] Introduce rb_replace_node_rcu() David Howells
2016-07-05 13:14 ` [PATCH net-next 22/24] rcu: Suppress sparse warnings for rcu_dereference_raw() David Howells
2016-07-05 21:32   ` Paul E. McKenney
2016-07-05 13:14 ` [PATCH net-next 23/24] rxrpc: Use RCU to access a peer's service connection tree David Howells
2016-07-05 13:15 ` [PATCH net-next 24/24] rxrpc: Kill off the call hash table David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=146772440976.21657.10583100542600642704.stgit@warthog.procyon.org.uk \
    --to=dhowells@redhat.com \
    --cc=davem@davemloft.net \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.