netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 0/3] sctp: Introduce sctp_flush_ctx
@ 2018-05-11 23:29 Marcelo Ricardo Leitner
  2018-05-11 23:29 ` [PATCH net-next 1/3] sctp: add sctp_flush_ctx, a context struct on outq_flush routines Marcelo Ricardo Leitner
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Marcelo Ricardo Leitner @ 2018-05-11 23:29 UTC (permalink / raw)
  To: netdev; +Cc: linux-sctp, Neil Horman, Vlad Yasevich, Xin Long

This struct will hold all the context used during the outq flush, so we
don't have to pass lots of pointers all around.

Checked on x86_64, the compiler inlines all these functions and there is no
derreference added because of the struct.

Marcelo Ricardo Leitner (3):
  sctp: add sctp_flush_ctx, a context struct on outq_flush routines
  sctp: add asoc and packet to sctp_flush_ctx
  sctp: checkpatch fixups

 net/sctp/outqueue.c | 259 ++++++++++++++++++++++++----------------------------
 1 file changed, 119 insertions(+), 140 deletions(-)

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH net-next 1/3] sctp: add sctp_flush_ctx, a context struct on outq_flush routines
  2018-05-11 23:29 [PATCH net-next 0/3] sctp: Introduce sctp_flush_ctx Marcelo Ricardo Leitner
@ 2018-05-11 23:29 ` Marcelo Ricardo Leitner
  2018-05-11 23:30 ` [PATCH net-next 2/3] sctp: add asoc and packet to sctp_flush_ctx Marcelo Ricardo Leitner
  2018-05-11 23:30 ` [PATCH net-next 3/3] sctp: checkpatch fixups Marcelo Ricardo Leitner
  2 siblings, 0 replies; 4+ messages in thread
From: Marcelo Ricardo Leitner @ 2018-05-11 23:29 UTC (permalink / raw)
  To: netdev; +Cc: linux-sctp, Neil Horman, Vlad Yasevich, Xin Long

With this struct we avoid passing lots of variables around and taking care
of updating the current transport/packet.

Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
---
 net/sctp/outqueue.c | 182 +++++++++++++++++++++++++---------------------------
 1 file changed, 88 insertions(+), 94 deletions(-)

diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c7f65bcd7bd6ee6996080d091bda1651f7bb8c44..db94a2513dd874149aa77c4936f68537e97f8855 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -791,13 +791,22 @@ static int sctp_packet_singleton(struct sctp_transport *transport,
 	return sctp_packet_transmit(&singleton, gfp);
 }
 
-static bool sctp_outq_select_transport(struct sctp_chunk *chunk,
-				       struct sctp_association *asoc,
-				       struct sctp_transport **transport,
-				       struct list_head *transport_list)
+/* Struct to hold the context during sctp outq flush */
+struct sctp_flush_ctx {
+	struct sctp_outq *q;
+	/* Current transport being used. It's NOT the same as curr active one */
+	struct sctp_transport *transport;
+	/* These transports have chunks to send. */
+	struct list_head transport_list;
+	gfp_t gfp;
+};
+
+/* transport: current transport */
+static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
+				       struct sctp_chunk *chunk)
 {
 	struct sctp_transport *new_transport = chunk->transport;
-	struct sctp_transport *curr = *transport;
+	struct sctp_association *asoc = ctx->q->asoc;
 	bool changed = false;
 
 	if (!new_transport) {
@@ -812,9 +821,9 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk,
 			 * after processing ASCONFs, we may have new
 			 * transports created.
 			 */
-			if (curr && sctp_cmp_addr_exact(&chunk->dest,
-							&curr->ipaddr))
-				new_transport = curr;
+			if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
+							&ctx->transport->ipaddr))
+				new_transport = ctx->transport;
 			else
 				new_transport = sctp_assoc_lookup_paddr(asoc,
 								  &chunk->dest);
@@ -857,37 +866,33 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk,
 	}
 
 	/* Are we switching transports? Take care of transport locks. */
-	if (new_transport != curr) {
+	if (new_transport != ctx->transport) {
 		changed = true;
-		curr = new_transport;
-		*transport = curr;
-		if (list_empty(&curr->send_ready))
-			list_add_tail(&curr->send_ready, transport_list);
+		ctx->transport = new_transport;
+		if (list_empty(&ctx->transport->send_ready))
+			list_add_tail(&ctx->transport->send_ready,
+				      &ctx->transport_list);
 
-		sctp_packet_config(&curr->packet, asoc->peer.i.init_tag,
+		sctp_packet_config(&ctx->transport->packet, asoc->peer.i.init_tag,
 				   asoc->peer.ecn_capable);
 		/* We've switched transports, so apply the
 		 * Burst limit to the new transport.
 		 */
-		sctp_transport_burst_limited(curr);
+		sctp_transport_burst_limited(ctx->transport);
 	}
 
 	return changed;
 }
 
-static void sctp_outq_flush_ctrl(struct sctp_outq *q,
-				 struct sctp_transport **_transport,
-				 struct list_head *transport_list,
-				 gfp_t gfp)
+static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 {
-	struct sctp_transport *transport = *_transport;
-	struct sctp_association *asoc = q->asoc;
+	struct sctp_association *asoc = ctx->q->asoc;
 	struct sctp_packet *packet = NULL;
 	struct sctp_chunk *chunk, *tmp;
 	enum sctp_xmit status;
 	int one_packet, error;
 
-	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+	list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
 		one_packet = 0;
 
 		/* RFC 5061, 5.3
@@ -905,11 +910,8 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q,
 		/* Pick the right transport to use. Should always be true for
 		 * the first chunk as we don't have a transport by then.
 		 */
-		if (sctp_outq_select_transport(chunk, asoc, &transport,
-					       &transport_list)) {
-			transport = *_transport;
-			packet = &transport->packet;
-		}
+		if (sctp_outq_select_transport(ctx, chunk))
+			packet = &ctx->transport->packet;
 
 		switch (chunk->chunk_hdr->type) {
 		/*
@@ -921,7 +923,8 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q,
 		case SCTP_CID_INIT:
 		case SCTP_CID_INIT_ACK:
 		case SCTP_CID_SHUTDOWN_COMPLETE:
-			error = sctp_packet_singleton(transport, chunk, gfp);
+			error = sctp_packet_singleton(ctx->transport, chunk,
+						      ctx->gfp);
 			if (error < 0) {
 				asoc->base.sk->sk_err = -error;
 				return;
@@ -957,10 +960,10 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q,
 		case SCTP_CID_I_FWD_TSN:
 		case SCTP_CID_RECONF:
 			status = sctp_packet_transmit_chunk(packet, chunk,
-							    one_packet, gfp);
+							    one_packet, ctx->gfp);
 			if (status != SCTP_XMIT_OK) {
 				/* put the chunk back */
-				list_add(&chunk->list, &q->control_chunk_list);
+				list_add(&chunk->list, &ctx->q->control_chunk_list);
 				break;
 			}
 
@@ -971,12 +974,12 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q,
 			 */
 			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
 			    chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
-				sctp_transport_reset_t3_rtx(transport);
-				transport->last_time_sent = jiffies;
+				sctp_transport_reset_t3_rtx(ctx->transport);
+				ctx->transport->last_time_sent = jiffies;
 			}
 
 			if (chunk == asoc->strreset_chunk)
-				sctp_transport_reset_reconf_timer(transport);
+				sctp_transport_reset_reconf_timer(ctx->transport);
 
 			break;
 
@@ -988,41 +991,38 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q,
 }
 
 /* Returns false if new data shouldn't be sent */
-static bool sctp_outq_flush_rtx(struct sctp_outq *q,
-				struct sctp_transport **_transport,
-				struct list_head *transport_list,
-				int rtx_timeout, gfp_t gfp)
+static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
+				int rtx_timeout)
 {
-	struct sctp_transport *transport = *_transport;
-	struct sctp_packet *packet = transport ? &transport->packet : NULL;
-	struct sctp_association *asoc = q->asoc;
+	struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet :
+				     NULL;
+	struct sctp_association *asoc = ctx->q->asoc;
 	int error, start_timer = 0;
 
 	if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
 		return false;
 
-	if (transport != asoc->peer.retran_path) {
+	if (ctx->transport != asoc->peer.retran_path) {
 		/* Switch transports & prepare the packet.  */
-		transport = asoc->peer.retran_path;
-		*_transport = transport;
+		ctx->transport = asoc->peer.retran_path;
 
-		if (list_empty(&transport->send_ready))
-			list_add_tail(&transport->send_ready,
-				      transport_list);
+		if (list_empty(&ctx->transport->send_ready))
+			list_add_tail(&ctx->transport->send_ready,
+				      &ctx->transport_list);
 
-		packet = &transport->packet;
+		packet = &ctx->transport->packet;
 		sctp_packet_config(packet, asoc->peer.i.init_tag,
 				   asoc->peer.ecn_capable);
 	}
 
-	error = __sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer,
-				      gfp);
+	error = __sctp_outq_flush_rtx(ctx->q, packet, rtx_timeout, &start_timer,
+				      ctx->gfp);
 	if (error < 0)
 		asoc->base.sk->sk_err = -error;
 
 	if (start_timer) {
-		sctp_transport_reset_t3_rtx(transport);
-		transport->last_time_sent = jiffies;
+		sctp_transport_reset_t3_rtx(ctx->transport);
+		ctx->transport->last_time_sent = jiffies;
 	}
 
 	/* This can happen on COOKIE-ECHO resend.  Only
@@ -1034,20 +1034,18 @@ static bool sctp_outq_flush_rtx(struct sctp_outq *q,
 	/* Don't send new data if there is still data
 	 * waiting to retransmit.
 	 */
-	if (!list_empty(&q->retransmit))
+	if (!list_empty(&ctx->q->retransmit))
 		return false;
 
 	return true;
 }
 
-static void sctp_outq_flush_data(struct sctp_outq *q,
-				 struct sctp_transport **_transport,
-				 struct list_head *transport_list,
-				 int rtx_timeout, gfp_t gfp)
+static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
+				 int rtx_timeout)
 {
-	struct sctp_transport *transport = *_transport;
-	struct sctp_packet *packet = transport ? &transport->packet : NULL;
-	struct sctp_association *asoc = q->asoc;
+	struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet :
+				     NULL;
+	struct sctp_association *asoc = ctx->q->asoc;
 	struct sctp_chunk *chunk;
 	enum sctp_xmit status;
 
@@ -1080,13 +1078,11 @@ static void sctp_outq_flush_data(struct sctp_outq *q,
 	 * are marked for retransmission (limited by the
 	 * current cwnd).
 	 */
-	if (!list_empty(&q->retransmit)) {
-		if (!sctp_outq_flush_rtx(q, _transport, transport_list,
-					 rtx_timeout, gfp))
+	if (!list_empty(&ctx->q->retransmit)) {
+		if (!sctp_outq_flush_rtx(ctx, rtx_timeout))
 			return;
 		/* We may have switched current transport */
-		transport = *_transport;
-		packet = &transport->packet;
+		packet = &ctx->transport->packet;
 	}
 
 	/* Apply Max.Burst limitation to the current transport in
@@ -1094,42 +1090,39 @@ static void sctp_outq_flush_data(struct sctp_outq *q,
 	 * rest it before we return, but we want to apply the limit
 	 * to the currently queued data.
 	 */
-	if (transport)
-		sctp_transport_burst_limited(transport);
+	if (ctx->transport)
+		sctp_transport_burst_limited(ctx->transport);
 
 	/* Finally, transmit new packets.  */
-	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
+	while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
 		__u32 sid = ntohs(chunk->subh.data_hdr->stream);
 
 		/* Has this chunk expired? */
 		if (sctp_chunk_abandoned(chunk)) {
-			sctp_sched_dequeue_done(q, chunk);
+			sctp_sched_dequeue_done(ctx->q, chunk);
 			sctp_chunk_fail(chunk, 0);
 			sctp_chunk_free(chunk);
 			continue;
 		}
 
 		if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
-			sctp_outq_head_data(q, chunk);
+			sctp_outq_head_data(ctx->q, chunk);
 			break;
 		}
 
-		if (sctp_outq_select_transport(chunk, asoc, &transport,
-					       &transport_list)) {
-			transport = *_transport;
-			packet = &transport->packet;
-		}
+		if (sctp_outq_select_transport(ctx, chunk))
+			packet = &ctx->transport->packet;
 
 		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
 			 "skb->users:%d\n",
-			 __func__, q, chunk, chunk && chunk->chunk_hdr ?
+			 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
 			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
 			 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
 			 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
 			 refcount_read(&chunk->skb->users) : -1);
 
 		/* Add the chunk to the packet.  */
-		status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
+		status = sctp_packet_transmit_chunk(packet, chunk, 0, ctx->gfp);
 		if (status != SCTP_XMIT_OK) {
 			/* We could not append this chunk, so put
 			 * the chunk back on the output queue.
@@ -1138,7 +1131,7 @@ static void sctp_outq_flush_data(struct sctp_outq *q,
 				 __func__, ntohl(chunk->subh.data_hdr->tsn),
 				 status);
 
-			sctp_outq_head_data(q, chunk);
+			sctp_outq_head_data(ctx->q, chunk);
 			break;
 		}
 
@@ -1156,13 +1149,13 @@ static void sctp_outq_flush_data(struct sctp_outq *q,
 		/* Only now it's safe to consider this
 		 * chunk as sent, sched-wise.
 		 */
-		sctp_sched_dequeue_done(q, chunk);
+		sctp_sched_dequeue_done(ctx->q, chunk);
 
 		list_add_tail(&chunk->transmitted_list,
-			      &transport->transmitted);
+			      &ctx->transport->transmitted);
 
-		sctp_transport_reset_t3_rtx(transport);
-		transport->last_time_sent = jiffies;
+		sctp_transport_reset_t3_rtx(ctx->transport);
+		ctx->transport->last_time_sent = jiffies;
 
 		/* Only let one DATA chunk get bundled with a
 		 * COOKIE-ECHO chunk.
@@ -1172,22 +1165,20 @@ static void sctp_outq_flush_data(struct sctp_outq *q,
 	}
 }
 
-static void sctp_outq_flush_transports(struct sctp_outq *q,
-				       struct list_head *transport_list,
-				       gfp_t gfp)
+static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
 {
 	struct list_head *ltransport;
 	struct sctp_packet *packet;
 	struct sctp_transport *t;
 	int error = 0;
 
-	while ((ltransport = sctp_list_dequeue(transport_list)) != NULL) {
+	while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
 		t = list_entry(ltransport, struct sctp_transport, send_ready);
 		packet = &t->packet;
 		if (!sctp_packet_empty(packet)) {
-			error = sctp_packet_transmit(packet, gfp);
+			error = sctp_packet_transmit(packet, ctx->gfp);
 			if (error < 0)
-				q->asoc->base.sk->sk_err = -error;
+				ctx->q->asoc->base.sk->sk_err = -error;
 		}
 
 		/* Clear the burst limited state, if any */
@@ -1204,12 +1195,15 @@ static void sctp_outq_flush_transports(struct sctp_outq *q,
  * locking concerns must be made.  Today we use the sock lock to protect
  * this function.
  */
+
 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 {
-	/* Current transport being used. It's NOT the same as curr active one */
-	struct sctp_transport *transport = NULL;
-	/* These transports have chunks to send. */
-	LIST_HEAD(transport_list);
+	struct sctp_flush_ctx ctx = {
+		.q = q,
+		.transport = NULL,
+		.transport_list = LIST_HEAD_INIT(ctx.transport_list),
+		.gfp = gfp,
+	};
 
 	/*
 	 * 6.10 Bundling
@@ -1221,16 +1215,16 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 	 *   ...
 	 */
 
-	sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp);
+	sctp_outq_flush_ctrl(&ctx);
 
 	if (q->asoc->src_out_of_asoc_ok)
 		goto sctp_flush_out;
 
-	sctp_outq_flush_data(q, &transport, &transport_list, rtx_timeout, gfp);
+	sctp_outq_flush_data(&ctx, rtx_timeout);
 
 sctp_flush_out:
 
-	sctp_outq_flush_transports(q, &transport_list, gfp);
+	sctp_outq_flush_transports(&ctx);
 }
 
 /* Update unack_data based on the incoming SACK chunk */
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net-next 2/3] sctp: add asoc and packet to sctp_flush_ctx
  2018-05-11 23:29 [PATCH net-next 0/3] sctp: Introduce sctp_flush_ctx Marcelo Ricardo Leitner
  2018-05-11 23:29 ` [PATCH net-next 1/3] sctp: add sctp_flush_ctx, a context struct on outq_flush routines Marcelo Ricardo Leitner
@ 2018-05-11 23:30 ` Marcelo Ricardo Leitner
  2018-05-11 23:30 ` [PATCH net-next 3/3] sctp: checkpatch fixups Marcelo Ricardo Leitner
  2 siblings, 0 replies; 4+ messages in thread
From: Marcelo Ricardo Leitner @ 2018-05-11 23:30 UTC (permalink / raw)
  To: netdev; +Cc: linux-sctp, Neil Horman, Vlad Yasevich, Xin Long

Pre-compute these so the compiler won't reload them (due to
no-strict-aliasing).

Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
---
 net/sctp/outqueue.c | 99 ++++++++++++++++++++++++-----------------------------
 1 file changed, 45 insertions(+), 54 deletions(-)

diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index db94a2513dd874149aa77c4936f68537e97f8855..a594d181fa1178c34cf477e13d700f7b37e72e21 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -798,16 +798,17 @@ struct sctp_flush_ctx {
 	struct sctp_transport *transport;
 	/* These transports have chunks to send. */
 	struct list_head transport_list;
+	struct sctp_association *asoc;
+	/* Packet on the current transport above */
+	struct sctp_packet *packet;
 	gfp_t gfp;
 };
 
 /* transport: current transport */
-static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
+static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 				       struct sctp_chunk *chunk)
 {
 	struct sctp_transport *new_transport = chunk->transport;
-	struct sctp_association *asoc = ctx->q->asoc;
-	bool changed = false;
 
 	if (!new_transport) {
 		if (!sctp_chunk_is_data(chunk)) {
@@ -825,7 +826,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 							&ctx->transport->ipaddr))
 				new_transport = ctx->transport;
 			else
-				new_transport = sctp_assoc_lookup_paddr(asoc,
+				new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
 								  &chunk->dest);
 		}
 
@@ -833,7 +834,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 		 * use the current active path.
 		 */
 		if (!new_transport)
-			new_transport = asoc->peer.active_path;
+			new_transport = ctx->asoc->peer.active_path;
 	} else {
 		__u8 type;
 
@@ -858,7 +859,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 			if (type != SCTP_CID_HEARTBEAT &&
 			    type != SCTP_CID_HEARTBEAT_ACK &&
 			    type != SCTP_CID_ASCONF_ACK)
-				new_transport = asoc->peer.active_path;
+				new_transport = ctx->asoc->peer.active_path;
 			break;
 		default:
 			break;
@@ -867,27 +868,25 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 
 	/* Are we switching transports? Take care of transport locks. */
 	if (new_transport != ctx->transport) {
-		changed = true;
 		ctx->transport = new_transport;
+		ctx->packet = &ctx->transport->packet;
+
 		if (list_empty(&ctx->transport->send_ready))
 			list_add_tail(&ctx->transport->send_ready,
 				      &ctx->transport_list);
 
-		sctp_packet_config(&ctx->transport->packet, asoc->peer.i.init_tag,
-				   asoc->peer.ecn_capable);
+		sctp_packet_config(ctx->packet,
+				   ctx->asoc->peer.i.init_tag,
+				   ctx->asoc->peer.ecn_capable);
 		/* We've switched transports, so apply the
 		 * Burst limit to the new transport.
 		 */
 		sctp_transport_burst_limited(ctx->transport);
 	}
-
-	return changed;
 }
 
 static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 {
-	struct sctp_association *asoc = ctx->q->asoc;
-	struct sctp_packet *packet = NULL;
 	struct sctp_chunk *chunk, *tmp;
 	enum sctp_xmit status;
 	int one_packet, error;
@@ -901,7 +900,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 		 * NOT use the new IP address as a source for ANY SCTP
 		 * packet except on carrying an ASCONF Chunk.
 		 */
-		if (asoc->src_out_of_asoc_ok &&
+		if (ctx->asoc->src_out_of_asoc_ok &&
 		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
 			continue;
 
@@ -910,8 +909,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 		/* Pick the right transport to use. Should always be true for
 		 * the first chunk as we don't have a transport by then.
 		 */
-		if (sctp_outq_select_transport(ctx, chunk))
-			packet = &ctx->transport->packet;
+		sctp_outq_select_transport(ctx, chunk);
 
 		switch (chunk->chunk_hdr->type) {
 		/*
@@ -926,14 +924,14 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 			error = sctp_packet_singleton(ctx->transport, chunk,
 						      ctx->gfp);
 			if (error < 0) {
-				asoc->base.sk->sk_err = -error;
+				ctx->asoc->base.sk->sk_err = -error;
 				return;
 			}
 			break;
 
 		case SCTP_CID_ABORT:
 			if (sctp_test_T_bit(chunk))
-				packet->vtag = asoc->c.my_vtag;
+				ctx->packet->vtag = ctx->asoc->c.my_vtag;
 			/* fallthru */
 
 		/* The following chunks are "response" chunks, i.e.
@@ -959,7 +957,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 		case SCTP_CID_FWD_TSN:
 		case SCTP_CID_I_FWD_TSN:
 		case SCTP_CID_RECONF:
-			status = sctp_packet_transmit_chunk(packet, chunk,
+			status = sctp_packet_transmit_chunk(ctx->packet, chunk,
 							    one_packet, ctx->gfp);
 			if (status != SCTP_XMIT_OK) {
 				/* put the chunk back */
@@ -967,7 +965,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 				break;
 			}
 
-			asoc->stats.octrlchunks++;
+			ctx->asoc->stats.octrlchunks++;
 			/* PR-SCTP C5) If a FORWARD TSN is sent, the
 			 * sender MUST assure that at least one T3-rtx
 			 * timer is running.
@@ -978,7 +976,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 				ctx->transport->last_time_sent = jiffies;
 			}
 
-			if (chunk == asoc->strreset_chunk)
+			if (chunk == ctx->asoc->strreset_chunk)
 				sctp_transport_reset_reconf_timer(ctx->transport);
 
 			break;
@@ -994,31 +992,28 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
 				int rtx_timeout)
 {
-	struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet :
-				     NULL;
-	struct sctp_association *asoc = ctx->q->asoc;
 	int error, start_timer = 0;
 
-	if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
+	if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
 		return false;
 
-	if (ctx->transport != asoc->peer.retran_path) {
+	if (ctx->transport != ctx->asoc->peer.retran_path) {
 		/* Switch transports & prepare the packet.  */
-		ctx->transport = asoc->peer.retran_path;
+		ctx->transport = ctx->asoc->peer.retran_path;
+		ctx->packet = &ctx->transport->packet;
 
 		if (list_empty(&ctx->transport->send_ready))
 			list_add_tail(&ctx->transport->send_ready,
 				      &ctx->transport_list);
 
-		packet = &ctx->transport->packet;
-		sctp_packet_config(packet, asoc->peer.i.init_tag,
-				   asoc->peer.ecn_capable);
+		sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
+				   ctx->asoc->peer.ecn_capable);
 	}
 
-	error = __sctp_outq_flush_rtx(ctx->q, packet, rtx_timeout, &start_timer,
-				      ctx->gfp);
+	error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
+				      &start_timer, ctx->gfp);
 	if (error < 0)
-		asoc->base.sk->sk_err = -error;
+		ctx->asoc->base.sk->sk_err = -error;
 
 	if (start_timer) {
 		sctp_transport_reset_t3_rtx(ctx->transport);
@@ -1028,7 +1023,7 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
 	/* This can happen on COOKIE-ECHO resend.  Only
 	 * one chunk can get bundled with a COOKIE-ECHO.
 	 */
-	if (packet->has_cookie_echo)
+	if (ctx->packet->has_cookie_echo)
 		return false;
 
 	/* Don't send new data if there is still data
@@ -1043,20 +1038,17 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
 static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 				 int rtx_timeout)
 {
-	struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet :
-				     NULL;
-	struct sctp_association *asoc = ctx->q->asoc;
 	struct sctp_chunk *chunk;
 	enum sctp_xmit status;
 
 	/* Is it OK to send data chunks?  */
-	switch (asoc->state) {
+	switch (ctx->asoc->state) {
 	case SCTP_STATE_COOKIE_ECHOED:
 		/* Only allow bundling when this packet has a COOKIE-ECHO
 		 * chunk.
 		 */
-		if (!packet || !packet->has_cookie_echo)
-			return;
+		if (!ctx->packet || !ctx->packet->has_cookie_echo)
+			break;
 
 		/* fallthru */
 	case SCTP_STATE_ESTABLISHED:
@@ -1078,12 +1070,9 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 	 * are marked for retransmission (limited by the
 	 * current cwnd).
 	 */
-	if (!list_empty(&ctx->q->retransmit)) {
-		if (!sctp_outq_flush_rtx(ctx, rtx_timeout))
-			return;
-		/* We may have switched current transport */
-		packet = &ctx->transport->packet;
-	}
+	if (!list_empty(&ctx->q->retransmit) &&
+	    !sctp_outq_flush_rtx(ctx, rtx_timeout))
+		return;
 
 	/* Apply Max.Burst limitation to the current transport in
 	 * case it will be used for new data.  We are going to
@@ -1105,13 +1094,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 			continue;
 		}
 
-		if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
+		if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
 			sctp_outq_head_data(ctx->q, chunk);
 			break;
 		}
 
-		if (sctp_outq_select_transport(ctx, chunk))
-			packet = &ctx->transport->packet;
+		sctp_outq_select_transport(ctx, chunk);
 
 		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
 			 "skb->users:%d\n",
@@ -1122,7 +1110,8 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 			 refcount_read(&chunk->skb->users) : -1);
 
 		/* Add the chunk to the packet.  */
-		status = sctp_packet_transmit_chunk(packet, chunk, 0, ctx->gfp);
+		status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
+						    ctx->gfp);
 		if (status != SCTP_XMIT_OK) {
 			/* We could not append this chunk, so put
 			 * the chunk back on the output queue.
@@ -1139,12 +1128,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 		 * The sender MAY set the I-bit in the DATA
 		 * chunk header.
 		 */
-		if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
+		if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
 			chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
 		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-			asoc->stats.ouodchunks++;
+			ctx->asoc->stats.ouodchunks++;
 		else
-			asoc->stats.oodchunks++;
+			ctx->asoc->stats.oodchunks++;
 
 		/* Only now it's safe to consider this
 		 * chunk as sent, sched-wise.
@@ -1160,7 +1149,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 		/* Only let one DATA chunk get bundled with a
 		 * COOKIE-ECHO chunk.
 		 */
-		if (packet->has_cookie_echo)
+		if (ctx->packet->has_cookie_echo)
 			break;
 	}
 }
@@ -1202,6 +1191,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 		.q = q,
 		.transport = NULL,
 		.transport_list = LIST_HEAD_INIT(ctx.transport_list),
+		.asoc = q->asoc,
+		.packet = NULL,
 		.gfp = gfp,
 	};
 
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net-next 3/3] sctp: checkpatch fixups
  2018-05-11 23:29 [PATCH net-next 0/3] sctp: Introduce sctp_flush_ctx Marcelo Ricardo Leitner
  2018-05-11 23:29 ` [PATCH net-next 1/3] sctp: add sctp_flush_ctx, a context struct on outq_flush routines Marcelo Ricardo Leitner
  2018-05-11 23:30 ` [PATCH net-next 2/3] sctp: add asoc and packet to sctp_flush_ctx Marcelo Ricardo Leitner
@ 2018-05-11 23:30 ` Marcelo Ricardo Leitner
  2 siblings, 0 replies; 4+ messages in thread
From: Marcelo Ricardo Leitner @ 2018-05-11 23:30 UTC (permalink / raw)
  To: netdev; +Cc: linux-sctp, Neil Horman, Vlad Yasevich, Xin Long

A collection of fixups from previous patches, left for later to not
introduce unnecessary changes while moving code around.

Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
---
 net/sctp/outqueue.c | 20 +++++++-------------
 1 file changed, 7 insertions(+), 13 deletions(-)

diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a594d181fa1178c34cf477e13d700f7b37e72e21..9a2fa7d6d68b1d695cd745ed612eb32193f947e0 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -812,8 +812,7 @@ static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 
 	if (!new_transport) {
 		if (!sctp_chunk_is_data(chunk)) {
-			/*
-			 * If we have a prior transport pointer, see if
+			/* If we have a prior transport pointer, see if
 			 * the destination address of the chunk
 			 * matches the destination address of the
 			 * current transport.  If not a match, then
@@ -912,8 +911,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 		sctp_outq_select_transport(ctx, chunk);
 
 		switch (chunk->chunk_hdr->type) {
-		/*
-		 * 6.10 Bundling
+		/* 6.10 Bundling
 		 *   ...
 		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
 		 *   COMPLETE with any other chunks.  [Send them immediately.]
@@ -1061,8 +1059,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 		return;
 	}
 
-	/*
-	 * RFC 2960 6.1  Transmission of DATA Chunks
+	/* RFC 2960 6.1  Transmission of DATA Chunks
 	 *
 	 * C) When the time comes for the sender to transmit,
 	 * before sending new DATA chunks, the sender MUST
@@ -1101,8 +1098,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 
 		sctp_outq_select_transport(ctx, chunk);
 
-		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
-			 "skb->users:%d\n",
+		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
 			 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
 			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
 			 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
@@ -1175,8 +1171,7 @@ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
 	}
 }
 
-/*
- * Try to flush an outqueue.
+/* Try to flush an outqueue.
  *
  * Description: Send everything in q which we legally can, subject to
  * congestion limitations.
@@ -1196,8 +1191,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 		.gfp = gfp,
 	};
 
-	/*
-	 * 6.10 Bundling
+	/* 6.10 Bundling
 	 *   ...
 	 *   When bundling control chunks with DATA chunks, an
 	 *   endpoint MUST place control chunks first in the outbound
@@ -1768,7 +1762,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
 	if (TSN_lte(tsn, ctsn))
 		goto pass;
 
-	/* 3.3.4 Selective Acknowledgement (SACK) (3):
+	/* 3.3.4 Selective Acknowledgment (SACK) (3):
 	 *
 	 * Gap Ack Blocks:
 	 *  These fields contain the Gap Ack Blocks. They are repeated
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-05-11 23:30 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-11 23:29 [PATCH net-next 0/3] sctp: Introduce sctp_flush_ctx Marcelo Ricardo Leitner
2018-05-11 23:29 ` [PATCH net-next 1/3] sctp: add sctp_flush_ctx, a context struct on outq_flush routines Marcelo Ricardo Leitner
2018-05-11 23:30 ` [PATCH net-next 2/3] sctp: add asoc and packet to sctp_flush_ctx Marcelo Ricardo Leitner
2018-05-11 23:30 ` [PATCH net-next 3/3] sctp: checkpatch fixups Marcelo Ricardo Leitner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).