netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 00/15] s390/qeth: updates 2018-09-17
@ 2018-09-17 15:35 Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 01/15] s390/qeth: move L2 xmit code to core module Julian Wiedmann
                   ` (15 more replies)
  0 siblings, 16 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Hi Dave,

please apply the following patchset to net-next. This brings more restructuring
of qeth's transmit code (eliminating its last usage of skb_realloc_headroom()),
and the usual mix of minor improvements & cleanups.

Thanks,
Julian


Julian Wiedmann (15):
  s390/qeth: move L2 xmit code to core module
  s390/qeth: run non-offload L3 traffic over common xmit path
  s390/qeth: remove unused L3 xmit code
  s390/qeth: remove qeth_get_elements_no()
  s390/qeth: limit csum offload erratum to L3 devices
  s390/qeth: fix up protocol headers early
  s390/qeth: check size of required HW header cache object
  s390/qeth: prepare for copy-free TSO transmission
  s390/qeth: speed up TSO transmission
  s390/qeth: remove qeth_hdr_chk_and_bounce()
  s390/qeth: uninstall IRQ handler on device removal
  s390/qeth: invoke softirqs after napi_schedule()
  s390/qeth: fix typo in return value
  s390/qeth: fine-tune spinlocks
  s390/qeth: reduce 0-initializing when building IPA cmds

 drivers/s390/net/qeth_core.h      |  17 +-
 drivers/s390/net/qeth_core_main.c | 307 ++++++++++++++++++----------------
 drivers/s390/net/qeth_l2_main.c   | 116 +++++--------
 drivers/s390/net/qeth_l3_main.c   | 340 +++++++++++++-------------------------
 4 files changed, 334 insertions(+), 446 deletions(-)

-- 
2.16.4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH net-next 01/15] s390/qeth: move L2 xmit code to core module
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
@ 2018-09-17 15:35 ` Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 02/15] s390/qeth: run non-offload L3 traffic over common xmit path Julian Wiedmann
                   ` (14 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

We need the exact same transmit path for non-offload-eligible traffic on
L3 OSAs. So make it accessible from both sub-drivers.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |  5 +++
 drivers/s390/net/qeth_core_main.c | 59 +++++++++++++++++++++++++++++++
 drivers/s390/net/qeth_l2_main.c   | 74 ++++++---------------------------------
 3 files changed, 75 insertions(+), 63 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 34e0d476c5c6..2110fabdcc7a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1052,6 +1052,11 @@ int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
 		       struct qeth_hdr **hdr, unsigned int hdr_len,
 		       unsigned int proto_len, unsigned int *elements);
+int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
+	      struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+	      void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
+				  struct sk_buff *skb, int ipv, int cast_type,
+				  unsigned int data_len));
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index de8282420f96..d2ca33a9330a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4176,6 +4176,65 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 }
 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
 
+int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
+	      struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+	      void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
+				  struct sk_buff *skb, int ipv, int cast_type,
+				  unsigned int data_len))
+{
+	const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+	unsigned int frame_len = skb->len;
+	unsigned int data_offset = 0;
+	struct qeth_hdr *hdr = NULL;
+	unsigned int hd_len = 0;
+	unsigned int elements;
+	int push_len, rc;
+	bool is_sg;
+
+	rc = skb_cow_head(skb, hw_hdr_len);
+	if (rc)
+		return rc;
+
+	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+				      &elements);
+	if (push_len < 0)
+		return push_len;
+	if (!push_len) {
+		/* HW header needs its own buffer element. */
+		hd_len = hw_hdr_len + proto_len;
+		data_offset = proto_len;
+	}
+	fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+
+	is_sg = skb_is_nonlinear(skb);
+	if (IS_IQD(card)) {
+		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+					      hd_len);
+	} else {
+		/* TODO: drop skb_orphan() once TX completion is fast enough */
+		skb_orphan(skb);
+		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+					 hd_len, elements);
+	}
+
+	if (!rc) {
+		if (card->options.performance_stats) {
+			card->perf_stats.buf_elements_sent += elements;
+			if (is_sg)
+				card->perf_stats.sg_skbs_sent++;
+		}
+	} else {
+		if (!push_len)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+		if (rc == -EBUSY)
+			/* roll back to ETH header */
+			skb_pull(skb, push_len);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_xmit);
+
 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
 		struct qeth_reply *reply, unsigned long data)
 {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b5e38531733f..715d58af5fc4 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -193,8 +193,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 	return RTN_UNICAST;
 }
 
-static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
-				int cast_type, unsigned int data_len)
+static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+				struct sk_buff *skb, int ipv, int cast_type,
+				unsigned int data_len)
 {
 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
 
@@ -202,6 +203,12 @@ static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
 	hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
 	hdr->hdr.l2.pkt_length = data_len;
 
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+		if (card->options.performance_stats)
+			card->perf_stats.tx_csum++;
+	}
+
 	/* set byte byte 3 to casting flags */
 	if (cast_type == RTN_MULTICAST)
 		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
@@ -641,66 +648,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
 		qeth_promisc_to_bridge(card);
 }
 
-static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
-			struct qeth_qdio_out_q *queue, int cast_type, int ipv)
-{
-	const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
-	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
-	unsigned int frame_len = skb->len;
-	unsigned int data_offset = 0;
-	struct qeth_hdr *hdr = NULL;
-	unsigned int hd_len = 0;
-	unsigned int elements;
-	int push_len, rc;
-	bool is_sg;
-
-	rc = skb_cow_head(skb, hw_hdr_len);
-	if (rc)
-		return rc;
-
-	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
-				      &elements);
-	if (push_len < 0)
-		return push_len;
-	if (!push_len) {
-		/* HW header needs its own buffer element. */
-		hd_len = hw_hdr_len + proto_len;
-		data_offset = proto_len;
-	}
-	qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
-	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
-		if (card->options.performance_stats)
-			card->perf_stats.tx_csum++;
-	}
-
-	is_sg = skb_is_nonlinear(skb);
-	if (IS_IQD(card)) {
-		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
-					      hd_len);
-	} else {
-		/* TODO: drop skb_orphan() once TX completion is fast enough */
-		skb_orphan(skb);
-		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
-					 hd_len, elements);
-	}
-
-	if (!rc) {
-		if (card->options.performance_stats) {
-			card->perf_stats.buf_elements_sent += elements;
-			if (is_sg)
-				card->perf_stats.sg_skbs_sent++;
-		}
-	} else {
-		if (!push_len)
-			kmem_cache_free(qeth_core_header_cache, hdr);
-		if (rc == -EBUSY)
-			/* roll back to ETH header */
-			skb_pull(skb, push_len);
-	}
-	return rc;
-}
-
 static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
 			    struct qeth_qdio_out_q *queue)
 {
@@ -745,7 +692,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
 	if (IS_OSN(card))
 		rc = qeth_l2_xmit_osn(card, skb, queue);
 	else
-		rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
+		rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+			       qeth_l2_fill_header);
 
 	if (!rc) {
 		card->stats.tx_packets++;
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 02/15] s390/qeth: run non-offload L3 traffic over common xmit path
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 01/15] s390/qeth: move L2 xmit code to core module Julian Wiedmann
@ 2018-09-17 15:35 ` Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 03/15] s390/qeth: remove unused L3 xmit code Julian Wiedmann
                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

L3 OSAs can only offload IPv4 traffic, use the common L2 transmit path
for all other traffic.
In particular there's no support for TX VLAN offload, so any such packet
needs to be manually de-accelerated via ndo_features_check().

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_l3_main.c | 70 ++++++++++++++++++++++++++---------------
 1 file changed, 44 insertions(+), 26 deletions(-)

diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ada258c01a08..2733eb901b04 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1983,21 +1983,23 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
 	rcu_read_unlock();
 
 	/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
-	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
-		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
-				RTN_MULTICAST : RTN_UNICAST;
-	else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
+	switch (qeth_get_ip_version(skb)) {
+	case 4:
 		return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
 				RTN_MULTICAST : RTN_UNICAST;
-
-	/* ... and MAC address */
-	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
-		return RTN_BROADCAST;
-	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
-		return RTN_MULTICAST;
-
-	/* default to unicast */
-	return RTN_UNICAST;
+	case 6:
+		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNICAST;
+	default:
+		/* ... and MAC address */
+		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+					    skb->dev->broadcast))
+			return RTN_BROADCAST;
+		if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+			return RTN_MULTICAST;
+		/* default to unicast */
+		return RTN_UNICAST;
+	}
 }
 
 static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
@@ -2034,20 +2036,21 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 				struct sk_buff *skb, int ipv, int cast_type,
 				unsigned int data_len)
 {
+	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+
 	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
 	hdr->hdr.l3.length = data_len;
 
-	/*
-	 * before we're going to overwrite this location with next hop ip.
-	 * v6 uses passthrough, v4 sets the tag in the QDIO header.
-	 */
-	if (skb_vlan_tag_present(skb)) {
-		if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
-			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
-		else
-			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
-		hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
+	if (ipv == 4 || IS_IQD(card)) {
+		/* NETIF_F_HW_VLAN_CTAG_TX */
+		if (skb_vlan_tag_present(skb)) {
+			hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
+			hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
+		}
+	} else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
+		hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+		hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
 	}
 
 	if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2373,8 +2376,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 
 	if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
 		rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
-	else
+	else if (skb_is_gso(skb))
 		rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+	else
+		rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+			       qeth_l3_fill_header);
 
 	if (!rc) {
 		card->stats.tx_packets++;
@@ -2476,6 +2482,15 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
 	return 0;
 }
 
+static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
+						    struct net_device *dev,
+						    netdev_features_t features)
+{
+	if (qeth_get_ip_version(skb) != 4)
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+	return qeth_features_check(skb, dev, features);
+}
+
 static const struct net_device_ops qeth_l3_netdev_ops = {
 	.ndo_open		= qeth_l3_open,
 	.ndo_stop		= qeth_l3_stop,
@@ -2496,7 +2511,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 	.ndo_stop		= qeth_l3_stop,
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
-	.ndo_features_check	= qeth_features_check,
+	.ndo_features_check	= qeth_l3_osa_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
 	.ndo_do_ioctl		= qeth_do_ioctl,
@@ -2524,6 +2539,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 		}
 
 		card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
+		card->dev->needed_headroom = sizeof(struct qeth_hdr);
+		/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
+		card->dev->needed_headroom += VLAN_HLEN;
 
 		/*IPv6 address autoconfiguration stuff*/
 		qeth_l3_get_unique_id(card);
@@ -2545,6 +2563,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 	} else if (card->info.type == QETH_CARD_TYPE_IQD) {
 		card->dev->flags |= IFF_NOARP;
 		card->dev->netdev_ops = &qeth_l3_netdev_ops;
+		card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
 
 		rc = qeth_l3_iqd_read_initial_mac(card);
 		if (rc)
@@ -2556,7 +2575,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 		return -ENODEV;
 
 	card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
-	card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
 	card->dev->features |=	NETIF_F_HW_VLAN_CTAG_TX |
 				NETIF_F_HW_VLAN_CTAG_RX |
 				NETIF_F_HW_VLAN_CTAG_FILTER;
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 03/15] s390/qeth: remove unused L3 xmit code
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 01/15] s390/qeth: move L2 xmit code to core module Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 02/15] s390/qeth: run non-offload L3 traffic over common xmit path Julian Wiedmann
@ 2018-09-17 15:35 ` Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 04/15] s390/qeth: remove qeth_get_elements_no() Julian Wiedmann
                   ` (12 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

qeth_l3_xmit() is now only used for TSOv4 traffic, shrink it down.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_l3_main.c | 71 ++++++++++-------------------------------
 1 file changed, 17 insertions(+), 54 deletions(-)

diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 2733eb901b04..00e6e7471f5d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2230,44 +2230,24 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
 			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
 {
-	int elements, len, rc;
-	__be16 *tag;
 	struct qeth_hdr *hdr = NULL;
-	int hdr_elements = 0;
 	struct sk_buff *new_skb = NULL;
 	int tx_bytes = skb->len;
 	unsigned int hd_len;
-	bool use_tso, is_sg;
-
-	/* Ignore segment size from skb_is_gso(), 1 page is always used. */
-	use_tso = skb_is_gso(skb) &&
-		  (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
+	int elements, rc;
+	bool is_sg;
 
 	/* create a clone with writeable headroom */
-	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
-					    VLAN_HLEN);
+	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso));
 	if (!new_skb)
 		return -ENOMEM;
 
-	if (ipv == 4) {
-		skb_pull(new_skb, ETH_HLEN);
-	} else if (skb_vlan_tag_present(new_skb)) {
-		skb_push(new_skb, VLAN_HLEN);
-		skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
-		skb_copy_to_linear_data_offset(new_skb, 4,
-					       new_skb->data + 8, 4);
-		skb_copy_to_linear_data_offset(new_skb, 8,
-					       new_skb->data + 12, 4);
-		tag = (__be16 *)(new_skb->data + 12);
-		*tag = cpu_to_be16(ETH_P_8021Q);
-		*(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
-	}
+	skb_pull(new_skb, ETH_HLEN);
 
 	/* fix hardware limitation: as long as we do not have sbal
 	 * chaining we can not send long frag lists
 	 */
-	if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-	    (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+	if (!qeth_l3_get_elements_no_tso(card, new_skb, 1)) {
 		rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -2280,38 +2260,23 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
 			goto out;
 	}
 
-	if (use_tso) {
-		hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
-		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-				    new_skb->len - sizeof(struct qeth_hdr_tso));
-		qeth_tso_fill_header(card, hdr, new_skb);
-		hdr_elements++;
-	} else {
-		hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-				    new_skb->len - sizeof(struct qeth_hdr));
-	}
+	hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
+	memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+	qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+			    new_skb->len - sizeof(struct qeth_hdr_tso));
+	qeth_tso_fill_header(card, hdr, new_skb);
 
-	elements = use_tso ?
-		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-		   qeth_get_elements_no(card, new_skb, hdr_elements, 0);
+	elements = qeth_l3_get_elements_no_tso(card, new_skb, 1);
 	if (!elements) {
 		rc = -E2BIG;
 		goto out;
 	}
-	elements += hdr_elements;
+	elements++;
 
-	if (use_tso) {
-		hd_len = sizeof(struct qeth_hdr_tso) +
-			 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
-		len = hd_len;
-	} else {
-		hd_len = 0;
-		len = sizeof(struct qeth_hdr_layer3);
-	}
+	hd_len = sizeof(struct qeth_hdr_tso) + ip_hdrlen(new_skb) +
+		 tcp_hdrlen(new_skb);
 
-	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, hd_len)) {
 		rc = -EINVAL;
 		goto out;
 	}
@@ -2327,10 +2292,8 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
 			card->perf_stats.buf_elements_sent += elements;
 			if (is_sg)
 				card->perf_stats.sg_skbs_sent++;
-			if (use_tso) {
-				card->perf_stats.large_send_bytes += tx_bytes;
-				card->perf_stats.large_send_cnt++;
-			}
+			card->perf_stats.large_send_bytes += tx_bytes;
+			card->perf_stats.large_send_cnt++;
 		}
 	} else {
 		if (new_skb != skb)
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 04/15] s390/qeth: remove qeth_get_elements_no()
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (2 preceding siblings ...)
  2018-09-17 15:35 ` [PATCH net-next 03/15] s390/qeth: remove unused L3 xmit code Julian Wiedmann
@ 2018-09-17 15:35 ` Julian Wiedmann
  2018-09-17 15:35 ` [PATCH net-next 05/15] s390/qeth: limit csum offload erratum to L3 devices Julian Wiedmann
                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Convert the last remaining user of qeth_get_elements_no() to
qeth_count_elements(), so this helper can be removed.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |  3 +--
 drivers/s390/net/qeth_core_main.c | 39 +++++++++++----------------------------
 drivers/s390/net/qeth_l2_main.c   |  4 ++--
 drivers/s390/net/qeth_l3_main.c   |  1 -
 4 files changed, 14 insertions(+), 33 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2110fabdcc7a..0857b1286660 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1007,8 +1007,7 @@ int qeth_query_switch_attributes(struct qeth_card *card,
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 	void *reply_param);
-int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
-			 int extra_elems, int data_offset);
+unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
 			     struct qeth_hdr *hdr, unsigned int offset,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d2ca33a9330a..eaf01dc62e91 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3802,7 +3802,16 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
 
-static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
+/**
+ * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
+ *				to transmit an skb.
+ * @skb:			the skb to operate on.
+ * @data_offset:		skip this part of the skb's linear data
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to map the
+ * skb's data (both its linear part and paged fragments).
+ */
+unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
 {
 	unsigned int elements = qeth_get_elements_for_frags(skb);
 	addr_t end = (addr_t)skb->data + skb_headlen(skb);
@@ -3812,33 +3821,7 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
 		elements += qeth_get_elements_for_range(start, end);
 	return elements;
 }
-
-/**
- * qeth_get_elements_no() -	find number of SBALEs for skb data, inc. frags.
- * @card:			qeth card structure, to check max. elems.
- * @skb:			SKB address
- * @extra_elems:		extra elems needed, to check against max.
- * @data_offset:		range starts at skb->data + data_offset
- *
- * Returns the number of pages, and thus QDIO buffer elements, needed to cover
- * skb data, including linear part and fragments. Checks if the result plus
- * extra_elems fits under the limit for the card. Returns 0 if it does not.
- * Note: extra_elems is not included in the returned result.
- */
-int qeth_get_elements_no(struct qeth_card *card,
-		     struct sk_buff *skb, int extra_elems, int data_offset)
-{
-	int elements = qeth_count_elements(skb, data_offset);
-
-	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
-		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
-			"(Number=%d / Length=%d). Discarded.\n",
-			elements + extra_elems, skb->len);
-		return 0;
-	}
-	return elements;
-}
-EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+EXPORT_SYMBOL_GPL(qeth_count_elements);
 
 int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
 {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 715d58af5fc4..87cb71d5dae8 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -658,8 +658,8 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
 		return -EPROTONOSUPPORT;
 
 	hdr = (struct qeth_hdr *)skb->data;
-	elements = qeth_get_elements_no(card, skb, 0, 0);
-	if (!elements)
+	elements = qeth_count_elements(skb, 0);
+	if (elements > QETH_MAX_BUFFER_ELEMENTS(card))
 		return -E2BIG;
 	if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
 		return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 00e6e7471f5d..1d92584e01b3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2140,7 +2140,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
  *
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * skb data, including linear part and fragments, but excluding TCP header.
- * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
  * Checks if the result plus extra_elems fits under the limit for the card.
  * Returns 0 if it does not.
  * Note: extra_elems is not included in the returned result.
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 05/15] s390/qeth: limit csum offload erratum to L3 devices
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (3 preceding siblings ...)
  2018-09-17 15:35 ` [PATCH net-next 04/15] s390/qeth: remove qeth_get_elements_no() Julian Wiedmann
@ 2018-09-17 15:35 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 06/15] s390/qeth: fix up protocol headers early Julian Wiedmann
                   ` (10 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:35 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Combined L3+L4 csum offload is only required for some L3 HW. So for
L2 devices, don't offload the IP header csum calculation.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Reference-ID: JUP 394553
---
 drivers/s390/net/qeth_core.h    | 5 -----
 drivers/s390/net/qeth_l3_main.c | 5 +++++
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0857b1286660..b47fb95a49e9 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -892,11 +892,6 @@ static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
 	if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
 	    (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
 		*flags |= QETH_HDR_EXT_UDP;
-	if (ipv == 4) {
-		/* some HW requires combined L3+L4 csum offload: */
-		*flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
-		ip_hdr(skb)->check = 0;
-	}
 }
 
 static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 1d92584e01b3..d4a967077279 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2055,6 +2055,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 
 	if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
 		qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+		/* some HW requires combined L3+L4 csum offload: */
+		if (ipv == 4) {
+			hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+			ip_hdr(skb)->check = 0;
+		}
 		if (card->options.performance_stats)
 			card->perf_stats.tx_csum++;
 	}
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 06/15] s390/qeth: fix up protocol headers early
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (4 preceding siblings ...)
  2018-09-17 15:35 ` [PATCH net-next 05/15] s390/qeth: limit csum offload erratum to L3 devices Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 07/15] s390/qeth: check size of required HW header cache object Julian Wiedmann
                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

When qeth_add_hw_header() falls back to the HW header cache, it also
copies over the necessary protocol headers. Thus any manipulation to
the protocol headers needs to happen before adding the HW header.

For current usage this doesn't matter, but it becomes relevant when
moving TSO transmission over to the faster code path.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_l3_main.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d4a967077279..0c085f57f7e2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2056,10 +2056,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 	if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
 		qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
 		/* some HW requires combined L3+L4 csum offload: */
-		if (ipv == 4) {
+		if (ipv == 4)
 			hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
-			ip_hdr(skb)->check = 0;
-		}
 		if (card->options.performance_stats)
 			card->perf_stats.tx_csum++;
 	}
@@ -2168,6 +2166,15 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
 	return elements;
 }
 
+static void qeth_l3_fixup_headers(struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* this is safe, IPv6 traffic takes a different path */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		iph->check = 0;
+}
+
 static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 				struct qeth_qdio_out_q *queue, int ipv,
 				int cast_type)
@@ -2188,6 +2195,7 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 	skb_pull(skb, ETH_HLEN);
 	frame_len = skb->len;
 
+	qeth_l3_fixup_headers(skb);
 	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
 				      &elements);
 	if (push_len < 0)
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 07/15] s390/qeth: check size of required HW header cache object
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (5 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 06/15] s390/qeth: fix up protocol headers early Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 08/15] s390/qeth: prepare for copy-free TSO transmission Julian Wiedmann
                   ` (8 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

When qeth_add_hw_header() falls back to the header cache, ensure that
the requested length doesn't exceed the object size.

For current usage this is a no-brainer, but TSO transmission will
introduce protocol headers of varying length.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core_main.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index eaf01dc62e91..79ebe8a5687b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/log2.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/mii.h>
@@ -3844,6 +3845,8 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
 }
 EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
 
+#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr) + ETH_HLEN)
+
 /**
  * qeth_add_hw_header() - add a HW header to an skb.
  * @skb: skb that the HW header should be added to.
@@ -3918,6 +3921,8 @@ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
 		return hdr_len;
 	}
 	/* fall back */
+	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
+		return -E2BIG;
 	*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
 	if (!*hdr)
 		return -ENOMEM;
@@ -6661,8 +6666,10 @@ static int __init qeth_core_init(void)
 	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
 	if (rc)
 		goto register_err;
-	qeth_core_header_cache = kmem_cache_create("qeth_hdr",
-			sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
+	qeth_core_header_cache =
+		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
+				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
+				  0, NULL);
 	if (!qeth_core_header_cache) {
 		rc = -ENOMEM;
 		goto slab_err;
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 08/15] s390/qeth: prepare for copy-free TSO transmission
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (6 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 07/15] s390/qeth: check size of required HW header cache object Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 09/15] s390/qeth: speed up " Julian Wiedmann
                   ` (7 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Add all the necessary TSO plumbing to the copy-less transmit path.
This includes calculating the right length of required protocol headers,
and always building a separate buffer element for the TSO headers.

A follow-up patch will then switch TSO traffic over to this path.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |  1 +
 drivers/s390/net/qeth_core_main.c | 10 ++++-
 drivers/s390/net/qeth_l2_main.c   |  1 -
 drivers/s390/net/qeth_l3_main.c   | 93 ++++++++++++++++++++++++++++-----------
 4 files changed, 77 insertions(+), 28 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b47fb95a49e9..d86eea9db2a7 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -26,6 +26,7 @@
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
+#include <net/tcp.h>
 
 #include <asm/debug.h>
 #include <asm/qdio.h>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 79ebe8a5687b..460ffdf1b200 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3845,7 +3845,8 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
 }
 EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
 
-#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr) + ETH_HLEN)
+#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
+					 MAX_TCP_HEADER)
 
 /**
  * qeth_add_hw_header() - add a HW header to an skb.
@@ -3880,7 +3881,11 @@ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
 	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
 		/* Push HW header into same page as first protocol header. */
 		push_ok = true;
-		__elements = qeth_count_elements(skb, 0);
+		/* ... but TSO always needs a separate element for headers: */
+		if (skb_is_gso(skb))
+			__elements = 1 + qeth_count_elements(skb, proto_len);
+		else
+			__elements = qeth_count_elements(skb, 0);
 	} else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
 		/* Push HW header into a new page. */
 		push_ok = true;
@@ -4193,6 +4198,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
 		hd_len = hw_hdr_len + proto_len;
 		data_offset = proto_len;
 	}
+	memset(hdr, 0, hw_hdr_len);
 	fill_header(card, hdr, skb, ipv, cast_type, frame_len);
 
 	is_sg = skb_is_nonlinear(skb);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 87cb71d5dae8..24b531ca2827 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -199,7 +199,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 {
 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
 
-	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
 	hdr->hdr.l2.pkt_length = data_len;
 
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0c085f57f7e2..fe55218802d7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2008,7 +2008,6 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
 	char daddr[16];
 	struct af_iucv_trans_hdr *iucv_hdr;
 
-	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
 	hdr->hdr.l3.length = data_len;
 	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
@@ -2038,10 +2037,22 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 {
 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
 
-	memset(hdr, 0, sizeof(struct qeth_hdr));
-	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
 	hdr->hdr.l3.length = data_len;
 
+	if (skb_is_gso(skb)) {
+		hdr->hdr.l3.id = QETH_HEADER_TYPE_TSO;
+	} else {
+		hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+			/* some HW requires combined L3+L4 csum offload: */
+			if (ipv == 4)
+				hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+			if (card->options.performance_stats)
+				card->perf_stats.tx_csum++;
+		}
+	}
+
 	if (ipv == 4 || IS_IQD(card)) {
 		/* NETIF_F_HW_VLAN_CTAG_TX */
 		if (skb_vlan_tag_present(skb)) {
@@ -2053,15 +2064,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 		hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
 	}
 
-	if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
-		qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
-		/* some HW requires combined L3+L4 csum offload: */
-		if (ipv == 4)
-			hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
-		if (card->options.performance_stats)
-			card->perf_stats.tx_csum++;
-	}
-
 	/* OSA only: */
 	if (!ipv) {
 		hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
@@ -2100,6 +2102,22 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 	rcu_read_unlock();
 }
 
+static void qeth_l3_fill_tso_ext(struct qeth_hdr_tso *hdr,
+				 unsigned int payload_len, struct sk_buff *skb,
+				 unsigned int proto_len)
+{
+	struct qeth_hdr_ext_tso *ext = &hdr->ext;
+
+	ext->hdr_tot_len = sizeof(*ext);
+	ext->imb_hdr_no = 1;
+	ext->hdr_type = 1;
+	ext->hdr_version = 1;
+	ext->hdr_len = 28;
+	ext->payload_len = payload_len;
+	ext->mss = skb_shinfo(skb)->gso_size;
+	ext->dg_hdr_len = proto_len;
+}
+
 static void qeth_tso_fill_header(struct qeth_card *card,
 		struct qeth_hdr *qhdr, struct sk_buff *skb)
 {
@@ -2108,8 +2126,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 	struct iphdr *iph = ip_hdr(skb);
 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
-	/*fix header to TSO values ...*/
-	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
 	/*set values which are fix for the first approach ...*/
 	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
 	hdr->ext.imb_hdr_no  = 1;
@@ -2173,20 +2189,35 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
 	/* this is safe, IPv6 traffic takes a different path */
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 		iph->check = 0;
+	if (skb_is_gso(skb)) {
+		iph->tot_len = 0;
+		tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
+						    iph->daddr, 0);
+	}
 }
 
 static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 				struct qeth_qdio_out_q *queue, int ipv,
 				int cast_type)
 {
-	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
-	unsigned int frame_len, elements;
+	unsigned int hw_hdr_len, proto_len, frame_len, elements;
 	unsigned char eth_hdr[ETH_HLEN];
+	bool is_tso = skb_is_gso(skb);
+	unsigned int data_offset = 0;
 	struct qeth_hdr *hdr = NULL;
 	unsigned int hd_len = 0;
 	int push_len, rc;
 	bool is_sg;
 
+	if (is_tso) {
+		hw_hdr_len = sizeof(struct qeth_hdr_tso);
+		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
+			    ETH_HLEN;
+	} else {
+		hw_hdr_len = sizeof(struct qeth_hdr);
+		proto_len = 0;
+	}
+
 	/* re-use the L2 header area for the HW header: */
 	rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
 	if (rc)
@@ -2196,28 +2227,36 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 	frame_len = skb->len;
 
 	qeth_l3_fixup_headers(skb);
-	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
+	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
 				      &elements);
 	if (push_len < 0)
 		return push_len;
-	if (!push_len) {
-		/* hdr was added discontiguous from skb->data */
-		hd_len = hw_hdr_len;
+	if (is_tso || !push_len) {
+		/* HW header needs its own buffer element. */
+		hd_len = hw_hdr_len + proto_len;
+		data_offset = push_len + proto_len;
 	}
+	memset(hdr, 0, hw_hdr_len);
 
-	if (skb->protocol == htons(ETH_P_AF_IUCV))
+	if (skb->protocol == htons(ETH_P_AF_IUCV)) {
 		qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
-	else
+	} else {
 		qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+		if (is_tso)
+			qeth_l3_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+					     frame_len - proto_len, skb,
+					     proto_len);
+	}
 
 	is_sg = skb_is_nonlinear(skb);
 	if (IS_IQD(card)) {
-		rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+					      hd_len);
 	} else {
 		/* TODO: drop skb_orphan() once TX completion is fast enough */
 		skb_orphan(skb);
-		rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
-					 elements);
+		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+					 hd_len, elements);
 	}
 
 	if (!rc) {
@@ -2225,6 +2264,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 			card->perf_stats.buf_elements_sent += elements;
 			if (is_sg)
 				card->perf_stats.sg_skbs_sent++;
+			if (is_tso) {
+				card->perf_stats.large_send_bytes += frame_len;
+				card->perf_stats.large_send_cnt++;
+			}
 		}
 	} else {
 		if (!push_len)
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 09/15] s390/qeth: speed up TSO transmission
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (7 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 08/15] s390/qeth: prepare for copy-free TSO transmission Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 10/15] s390/qeth: remove qeth_hdr_chk_and_bounce() Julian Wiedmann
                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Switch TSO over to the faster transmit path, and remove all the unused
old TSO code.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |   1 -
 drivers/s390/net/qeth_core_main.c |   3 +-
 drivers/s390/net/qeth_l3_main.c   | 151 ++------------------------------------
 3 files changed, 6 insertions(+), 149 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d86eea9db2a7..1c9fce609eb9 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1004,7 +1004,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 	void *reply_param);
 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
 			     struct qeth_hdr *hdr, unsigned int offset,
 			     unsigned int hd_len);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 460ffdf1b200..7426167eace2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3788,7 +3788,7 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * fragmented part of the SKB. Returns zero for linear SKB.
  */
-int qeth_get_elements_for_frags(struct sk_buff *skb)
+static int qeth_get_elements_for_frags(struct sk_buff *skb)
 {
 	int cnt, elements = 0;
 
@@ -3801,7 +3801,6 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
 	}
 	return elements;
 }
-EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
 
 /**
  * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fe55218802d7..5d7e2921ab36 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -33,7 +33,6 @@
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/ip6_fib.h>
-#include <net/ip6_checksum.h>
 #include <net/iucv/af_iucv.h>
 #include <linux/hashtable.h>
 
@@ -2118,70 +2117,6 @@ static void qeth_l3_fill_tso_ext(struct qeth_hdr_tso *hdr,
 	ext->dg_hdr_len = proto_len;
 }
 
-static void qeth_tso_fill_header(struct qeth_card *card,
-		struct qeth_hdr *qhdr, struct sk_buff *skb)
-{
-	struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
-	struct tcphdr *tcph = tcp_hdr(skb);
-	struct iphdr *iph = ip_hdr(skb);
-	struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
-	/*set values which are fix for the first approach ...*/
-	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
-	hdr->ext.imb_hdr_no  = 1;
-	hdr->ext.hdr_type    = 1;
-	hdr->ext.hdr_version = 1;
-	hdr->ext.hdr_len     = 28;
-	/*insert non-fix values */
-	hdr->ext.mss = skb_shinfo(skb)->gso_size;
-	hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
-	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
-				       sizeof(struct qeth_hdr_tso));
-	tcph->check = 0;
-	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
-		ip6h->payload_len = 0;
-		tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
-					       0, IPPROTO_TCP, 0);
-	} else {
-		/*OSA want us to set these values ...*/
-		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-					 0, IPPROTO_TCP, 0);
-		iph->tot_len = 0;
-		iph->check = 0;
-	}
-}
-
-/**
- * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
- * @card:			   qeth card structure, to check max. elems.
- * @skb:			   SKB address
- * @extra_elems:		   extra elems needed, to check against max.
- *
- * Returns the number of pages, and thus QDIO buffer elements, needed to cover
- * skb data, including linear part and fragments, but excluding TCP header.
- * Checks if the result plus extra_elems fits under the limit for the card.
- * Returns 0 if it does not.
- * Note: extra_elems is not included in the returned result.
- */
-static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
-			struct sk_buff *skb, int extra_elems)
-{
-	addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
-	addr_t end = (addr_t)skb->data + skb_headlen(skb);
-	int elements = qeth_get_elements_for_frags(skb);
-
-	if (start != end)
-		elements += qeth_get_elements_for_range(start, end);
-
-	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
-		QETH_DBF_MESSAGE(2,
-	"Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
-				elements + extra_elems, skb->len);
-		return 0;
-	}
-	return elements;
-}
-
 static void qeth_l3_fixup_headers(struct sk_buff *skb)
 {
 	struct iphdr *iph = ip_hdr(skb);
@@ -2196,9 +2131,8 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
 	}
 }
 
-static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
-				struct qeth_qdio_out_q *queue, int ipv,
-				int cast_type)
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
 {
 	unsigned int hw_hdr_len, proto_len, frame_len, elements;
 	unsigned char eth_hdr[ETH_HLEN];
@@ -2282,81 +2216,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
 	return rc;
 }
 
-static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
-			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
-{
-	struct qeth_hdr *hdr = NULL;
-	struct sk_buff *new_skb = NULL;
-	int tx_bytes = skb->len;
-	unsigned int hd_len;
-	int elements, rc;
-	bool is_sg;
-
-	/* create a clone with writeable headroom */
-	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso));
-	if (!new_skb)
-		return -ENOMEM;
-
-	skb_pull(new_skb, ETH_HLEN);
-
-	/* fix hardware limitation: as long as we do not have sbal
-	 * chaining we can not send long frag lists
-	 */
-	if (!qeth_l3_get_elements_no_tso(card, new_skb, 1)) {
-		rc = skb_linearize(new_skb);
-
-		if (card->options.performance_stats) {
-			if (rc)
-				card->perf_stats.tx_linfail++;
-			else
-				card->perf_stats.tx_lin++;
-		}
-		if (rc)
-			goto out;
-	}
-
-	hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
-	memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-	qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-			    new_skb->len - sizeof(struct qeth_hdr_tso));
-	qeth_tso_fill_header(card, hdr, new_skb);
-
-	elements = qeth_l3_get_elements_no_tso(card, new_skb, 1);
-	if (!elements) {
-		rc = -E2BIG;
-		goto out;
-	}
-	elements++;
-
-	hd_len = sizeof(struct qeth_hdr_tso) + ip_hdrlen(new_skb) +
-		 tcp_hdrlen(new_skb);
-
-	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, hd_len)) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	is_sg = skb_is_nonlinear(new_skb);
-	rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
-				 elements);
-out:
-	if (!rc) {
-		if (new_skb != skb)
-			dev_kfree_skb_any(skb);
-		if (card->options.performance_stats) {
-			card->perf_stats.buf_elements_sent += elements;
-			if (is_sg)
-				card->perf_stats.sg_skbs_sent++;
-			card->perf_stats.large_send_bytes += tx_bytes;
-			card->perf_stats.large_send_cnt++;
-		}
-	} else {
-		if (new_skb != skb)
-			dev_kfree_skb_any(new_skb);
-	}
-	return rc;
-}
-
 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 					   struct net_device *dev)
 {
@@ -2392,9 +2251,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 	}
 	netif_stop_queue(dev);
 
-	if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
-		rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
-	else if (skb_is_gso(skb))
+	if (ipv == 4 || IS_IQD(card))
 		rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
 	else
 		rc = qeth_xmit(card, skb, queue, ipv, cast_type,
@@ -2560,6 +2417,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 		card->dev->needed_headroom = sizeof(struct qeth_hdr);
 		/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
 		card->dev->needed_headroom += VLAN_HLEN;
+		if (qeth_is_supported(card, IPA_OUTBOUND_TSO))
+			card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
 
 		/*IPv6 address autoconfiguration stuff*/
 		qeth_l3_get_unique_id(card);
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 10/15] s390/qeth: remove qeth_hdr_chk_and_bounce()
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (8 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 09/15] s390/qeth: speed up " Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 11/15] s390/qeth: uninstall IRQ handler on device removal Julian Wiedmann
                   ` (5 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Restructure the OSN xmit path to handle misaligned HW headers properly,
without shifting the packet data around.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |  1 -
 drivers/s390/net/qeth_core_main.c | 21 ---------------------
 drivers/s390/net/qeth_l2_main.c   | 37 ++++++++++++++++++++++++++++---------
 3 files changed, 28 insertions(+), 31 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 1c9fce609eb9..be213b5c2552 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1021,7 +1021,6 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
 int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
 					 struct ethtool_link_ksettings *cmd);
 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
-int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 void qeth_trace_features(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7426167eace2..c7f7061a7205 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3823,27 +3823,6 @@ unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
 }
 EXPORT_SYMBOL_GPL(qeth_count_elements);
 
-int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
-{
-	int hroom, inpage, rest;
-
-	if (((unsigned long)skb->data & PAGE_MASK) !=
-	    (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
-		hroom = skb_headroom(skb);
-		inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
-		rest = len - inpage;
-		if (rest > hroom)
-			return 1;
-		memmove(skb->data - rest, skb->data, skb_headlen(skb));
-		skb->data -= rest;
-		skb->tail -= rest;
-		*hdr = (struct qeth_hdr *)skb->data;
-		QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
-
 #define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
 					 MAX_TCP_HEADER)
 
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 24b531ca2827..33b65471a68a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -650,19 +650,38 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
 static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
 			    struct qeth_qdio_out_q *queue)
 {
-	unsigned int elements;
-	struct qeth_hdr *hdr;
+	struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
+	addr_t end = (addr_t)(skb->data + sizeof(*hdr));
+	addr_t start = (addr_t)skb->data;
+	unsigned int elements = 0;
+	unsigned int hd_len = 0;
+	int rc;
 
 	if (skb->protocol == htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
 
-	hdr = (struct qeth_hdr *)skb->data;
-	elements = qeth_count_elements(skb, 0);
-	if (elements > QETH_MAX_BUFFER_ELEMENTS(card))
-		return -E2BIG;
-	if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
-		return -EINVAL;
-	return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
+	if (qeth_get_elements_for_range(start, end) > 1) {
+		/* Misaligned HW header, move it to its own buffer element. */
+		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+		if (!hdr)
+			return -ENOMEM;
+		hd_len = sizeof(*hdr);
+		skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
+		elements++;
+	}
+
+	elements += qeth_count_elements(skb, hd_len);
+	if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
+		rc = -E2BIG;
+		goto out;
+	}
+
+	rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
+				 elements);
+out:
+	if (rc && hd_len)
+		kmem_cache_free(qeth_core_header_cache, hdr);
+	return rc;
 }
 
 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 11/15] s390/qeth: uninstall IRQ handler on device removal
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (9 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 10/15] s390/qeth: remove qeth_hdr_chk_and_bounce() Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 12/15] s390/qeth: invoke softirqs after napi_schedule() Julian Wiedmann
                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

When setting up, qeth installs its IRQ handler on the ccw devices. But
the IRQ handler is not cleared on removal - so even after qeth yields
control of the ccw devices, spurious interrupts would still be presented
to us.

Make (de-)installation of the IRQ handler part of the ccw channel
setup/removal helpers, and while at it also add the appropriate locking.
Shift around qeth_setup_channel() to avoid a forward declaration for
qeth_irq().

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core_main.c | 102 ++++++++++++++++++++------------------
 1 file changed, 54 insertions(+), 48 deletions(-)

diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c7f7061a7205..b76e844811e1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -901,44 +901,6 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
 	qeth_release_buffer(channel, iob);
 }
 
-static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
-{
-	int cnt;
-
-	QETH_DBF_TEXT(SETUP, 2, "setupch");
-
-	channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
-	if (!channel->ccw)
-		return -ENOMEM;
-	channel->state = CH_STATE_DOWN;
-	atomic_set(&channel->irq_pending, 0);
-	init_waitqueue_head(&channel->wait_q);
-
-	if (!alloc_buffers)
-		return 0;
-
-	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
-		channel->iob[cnt].data =
-			kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
-		if (channel->iob[cnt].data == NULL)
-			break;
-		channel->iob[cnt].state = BUF_STATE_FREE;
-		channel->iob[cnt].channel = channel;
-		channel->iob[cnt].callback = qeth_send_control_data_cb;
-		channel->iob[cnt].rc = 0;
-	}
-	if (cnt < QETH_CMD_BUFFER_NO) {
-		kfree(channel->ccw);
-		while (cnt-- > 0)
-			kfree(channel->iob[cnt].data);
-		return -ENOMEM;
-	}
-	channel->io_buf_no = 0;
-	spin_lock_init(&channel->iob_lock);
-
-	return 0;
-}
-
 static int qeth_set_thread_start_bit(struct qeth_card *card,
 		unsigned long thread)
 {
@@ -1337,14 +1299,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
 
 static void qeth_clean_channel(struct qeth_channel *channel)
 {
+	struct ccw_device *cdev = channel->ccwdev;
 	int cnt;
 
 	QETH_DBF_TEXT(SETUP, 2, "freech");
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	cdev->handler = NULL;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
 	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
 		kfree(channel->iob[cnt].data);
 	kfree(channel->ccw);
 }
 
+static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
+{
+	struct ccw_device *cdev = channel->ccwdev;
+	int cnt;
+
+	QETH_DBF_TEXT(SETUP, 2, "setupch");
+
+	channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+	if (!channel->ccw)
+		return -ENOMEM;
+	channel->state = CH_STATE_DOWN;
+	atomic_set(&channel->irq_pending, 0);
+	init_waitqueue_head(&channel->wait_q);
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	cdev->handler = qeth_irq;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	if (!alloc_buffers)
+		return 0;
+
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+		channel->iob[cnt].data =
+			kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+		if (channel->iob[cnt].data == NULL)
+			break;
+		channel->iob[cnt].state = BUF_STATE_FREE;
+		channel->iob[cnt].channel = channel;
+		channel->iob[cnt].callback = qeth_send_control_data_cb;
+		channel->iob[cnt].rc = 0;
+	}
+	if (cnt < QETH_CMD_BUFFER_NO) {
+		qeth_clean_channel(channel);
+		return -ENOMEM;
+	}
+	channel->io_buf_no = 0;
+	spin_lock_init(&channel->iob_lock);
+
+	return 0;
+}
+
 static void qeth_set_single_write_queues(struct qeth_card *card)
 {
 	if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
@@ -1495,7 +1504,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
 			CARD_BUS_ID(card), card->info.mcl_level);
 }
 
-static struct qeth_card *qeth_alloc_card(void)
+static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card;
 
@@ -1504,6 +1513,11 @@ static struct qeth_card *qeth_alloc_card(void)
 	if (!card)
 		goto out;
 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	card->gdev = gdev;
+	CARD_RDEV(card) = gdev->cdev[0];
+	CARD_WDEV(card) = gdev->cdev[1];
+	CARD_DDEV(card) = gdev->cdev[2];
 	if (qeth_setup_channel(&card->read, true))
 		goto out_ip;
 	if (qeth_setup_channel(&card->write, true))
@@ -5773,7 +5787,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 
 	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
 
-	card = qeth_alloc_card();
+	card = qeth_alloc_card(gdev);
 	if (!card) {
 		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
 		rc = -ENOMEM;
@@ -5789,15 +5803,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 			goto err_card;
 	}
 
-	card->read.ccwdev  = gdev->cdev[0];
-	card->write.ccwdev = gdev->cdev[1];
-	card->data.ccwdev  = gdev->cdev[2];
 	dev_set_drvdata(&gdev->dev, card);
-	card->gdev = gdev;
-	gdev->cdev[0]->handler = qeth_irq;
-	gdev->cdev[1]->handler = qeth_irq;
-	gdev->cdev[2]->handler = qeth_irq;
-
 	qeth_setup_card(card);
 	qeth_update_from_chp_desc(card);
 
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 12/15] s390/qeth: invoke softirqs after napi_schedule()
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (10 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 11/15] s390/qeth: uninstall IRQ handler on device removal Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 13/15] s390/qeth: fix typo in return value Julian Wiedmann
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Calling napi_schedule() from process context does not ensure that the
NET_RX softirq is run in a timely fashion. So trigger it manually.

This is no big issue with current code. A call to ndo_open() is usually
followed by a ndo_set_rx_mode() call, and for qeth this contains a
spin_unlock_bh(). Except for OSN, where qeth_l2_set_rx_mode() bails out
early.
Nevertheless it's best to not depend on this behaviour, and just fix
the issue at its source like all other drivers do. For instance see
commit 83a0c6e58901 ("i40e: Invoke softirqs after napi_reschedule").

Fixes: a1c3ed4c9ca0 ("qeth: NAPI support for l2 and l3 discipline")
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_l2_main.c | 3 +++
 drivers/s390/net/qeth_l3_main.c | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 33b65471a68a..6285af373bdf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -755,7 +755,10 @@ static int __qeth_l2_open(struct net_device *dev)
 
 	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
 		napi_enable(&card->napi);
+		local_bh_disable();
 		napi_schedule(&card->napi);
+		/* kick-start the NAPI softirq: */
+		local_bh_enable();
 	} else
 		rc = -EIO;
 	return rc;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5d7e2921ab36..8930d2a9fcad 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2293,7 +2293,10 @@ static int __qeth_l3_open(struct net_device *dev)
 
 	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
 		napi_enable(&card->napi);
+		local_bh_disable();
 		napi_schedule(&card->napi);
+		/* kick-start the NAPI softirq: */
+		local_bh_enable();
 	} else
 		rc = -EIO;
 	return rc;
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 13/15] s390/qeth: fix typo in return value
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (11 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 12/15] s390/qeth: invoke softirqs after napi_schedule() Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 14/15] s390/qeth: fine-tune spinlocks Julian Wiedmann
                   ` (2 subsequent siblings)
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

Assuming this was just a typo, as returning an actual negative value
from a cmd callback would make no sense either.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core_main.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b76e844811e1..f09bef4a49ca 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3058,7 +3058,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
 		QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
 		card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
 		card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
-		return -0;
+		return 0;
 	default:
 		if (cmd->hdr.return_code) {
 			QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 14/15] s390/qeth: fine-tune spinlocks
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (12 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 13/15] s390/qeth: fix typo in return value Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 15:36 ` [PATCH net-next 15/15] s390/qeth: reduce 0-initializing when building IPA cmds Julian Wiedmann
  2018-09-17 16:10 ` [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 David Miller
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

For quite a lot of code paths it's obvious that they will never run in
IRQ context. So replace their spin_lock_irqsave() calls with
spin_lock_irq().

While at it, get rid of the redundant card pointer in struct qeth_reply
that was used by qeth_send_control_data() to access the card's lock.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core.h      |  1 -
 drivers/s390/net/qeth_core_main.c | 53 ++++++++++++++++-----------------------
 drivers/s390/net/qeth_l2_main.c   |  5 ++--
 3 files changed, 24 insertions(+), 35 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index be213b5c2552..0dbe81f958f0 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -639,7 +639,6 @@ struct qeth_reply {
 	atomic_t received;
 	int rc;
 	void *param;
-	struct qeth_card *card;
 	refcount_t refcnt;
 };
 
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f09bef4a49ca..100b6f4a3fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -592,7 +592,6 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 	if (reply) {
 		refcount_set(&reply->refcnt, 1);
 		atomic_set(&reply->received, 0);
-		reply->card = card;
 	}
 	return reply;
 }
@@ -1541,15 +1540,14 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
 
 static int qeth_clear_channel(struct qeth_channel *channel)
 {
-	unsigned long flags;
 	struct qeth_card *card;
 	int rc;
 
 	card = CARD_FROM_CDEV(channel->ccwdev);
 	QETH_CARD_TEXT(card, 3, "clearch");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 
 	if (rc)
 		return rc;
@@ -1565,15 +1563,14 @@ static int qeth_clear_channel(struct qeth_channel *channel)
 
 static int qeth_halt_channel(struct qeth_channel *channel)
 {
-	unsigned long flags;
 	struct qeth_card *card;
 	int rc;
 
 	card = CARD_FROM_CDEV(channel->ccwdev);
 	QETH_CARD_TEXT(card, 3, "haltch");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 
 	if (rc)
 		return rc;
@@ -1667,7 +1664,6 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
 	char *rcd_buf;
 	int ret;
 	struct qeth_channel *channel = &card->data;
-	unsigned long flags;
 
 	/*
 	 * scan for RCD command in extended SenseID data
@@ -1681,11 +1677,11 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
 
 	qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
 	channel->state = CH_STATE_RCD;
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
 				       QETH_RCD_PARM, LPM_ANYPATH, 0,
 				       QETH_RCD_TIMEOUT);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 	if (!ret)
 		wait_event(card->wait_q,
 			   (channel->state == CH_STATE_RCD_DONE ||
@@ -1843,7 +1839,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
 			struct qeth_cmd_buffer *))
 {
 	struct qeth_cmd_buffer *iob;
-	unsigned long flags;
 	int rc;
 	struct qeth_card *card;
 
@@ -1858,10 +1853,10 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
 	wait_event(card->wait_q,
 		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
 	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
 				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 
 	if (rc) {
 		QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
@@ -1888,7 +1883,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
 {
 	struct qeth_card *card;
 	struct qeth_cmd_buffer *iob;
-	unsigned long flags;
 	__u16 temp;
 	__u8 tmp;
 	int rc;
@@ -1928,10 +1922,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
 	wait_event(card->wait_q,
 		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
 	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
 				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 
 	if (rc) {
 		QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
@@ -2112,7 +2106,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
 {
 	struct qeth_channel *channel = iob->channel;
 	int rc;
-	unsigned long flags;
 	struct qeth_reply *reply = NULL;
 	unsigned long timeout, event_timeout;
 	struct qeth_ipa_cmd *cmd = NULL;
@@ -2145,26 +2138,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
 	}
 	qeth_prepare_control_data(card, len, iob);
 
-	spin_lock_irqsave(&card->lock, flags);
+	spin_lock_irq(&card->lock);
 	list_add_tail(&reply->list, &card->cmd_waiter_list);
-	spin_unlock_irqrestore(&card->lock, flags);
+	spin_unlock_irq(&card->lock);
 
 	timeout = jiffies + event_timeout;
 
 	QETH_CARD_TEXT(card, 6, "noirqpnd");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
 				      (addr_t) iob, 0, 0, event_timeout);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 	if (rc) {
 		QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
 			"ccw_device_start rc = %i\n",
 			dev_name(&channel->ccwdev->dev), rc);
 		QETH_CARD_TEXT_(card, 2, " err%d", rc);
-		spin_lock_irqsave(&card->lock, flags);
+		spin_lock_irq(&card->lock);
 		list_del_init(&reply->list);
 		qeth_put_reply(reply);
-		spin_unlock_irqrestore(&card->lock, flags);
+		spin_unlock_irq(&card->lock);
 		qeth_release_buffer(channel, iob);
 		atomic_set(&channel->irq_pending, 0);
 		wake_up(&card->wait_q);
@@ -2192,9 +2185,9 @@ int qeth_send_control_data(struct qeth_card *card, int len,
 
 time_err:
 	reply->rc = -ETIME;
-	spin_lock_irqsave(&reply->card->lock, flags);
+	spin_lock_irq(&card->lock);
 	list_del_init(&reply->list);
-	spin_unlock_irqrestore(&reply->card->lock, flags);
+	spin_unlock_irq(&card->lock);
 	atomic_inc(&reply->received);
 	rc = reply->rc;
 	qeth_put_reply(reply);
@@ -5776,7 +5769,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 	struct device *dev;
 	int rc;
 	enum qeth_discipline_id enforced_disc;
-	unsigned long flags;
 	char dbf_name[DBF_NAME_LEN];
 
 	QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -5834,9 +5826,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 		break;
 	}
 
-	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	write_lock_irq(&qeth_core_card_list.rwlock);
 	list_add_tail(&card->list, &qeth_core_card_list.list);
-	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	write_unlock_irq(&qeth_core_card_list.rwlock);
 	return 0;
 
 err_disc:
@@ -5852,7 +5844,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 
 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
 {
-	unsigned long flags;
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
 	QETH_DBF_TEXT(SETUP, 2, "removedv");
@@ -5862,9 +5853,9 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
 		qeth_core_free_discipline(card);
 	}
 
-	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	write_lock_irq(&qeth_core_card_list.rwlock);
 	list_del(&card->list);
-	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	write_unlock_irq(&qeth_core_card_list.rwlock);
 	free_netdev(card->dev);
 	qeth_core_free_card(card);
 	dev_set_drvdata(&gdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6285af373bdf..12858ffa28cf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1209,7 +1209,6 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
 			   struct qeth_cmd_buffer *iob)
 {
 	struct qeth_channel *channel = iob->channel;
-	unsigned long flags;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 5, "osndctrd");
@@ -1218,10 +1217,10 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
 		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
 	qeth_prepare_control_data(card, len, iob);
 	QETH_CARD_TEXT(card, 6, "osnoirqp");
-	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
 	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
 				      (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
-	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
 	if (rc) {
 		QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
 			   "ccw_device_start rc = %i\n", rc);
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 15/15] s390/qeth: reduce 0-initializing when building IPA cmds
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (13 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 14/15] s390/qeth: fine-tune spinlocks Julian Wiedmann
@ 2018-09-17 15:36 ` Julian Wiedmann
  2018-09-17 16:10 ` [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 David Miller
  15 siblings, 0 replies; 17+ messages in thread
From: Julian Wiedmann @ 2018-09-17 15:36 UTC (permalink / raw)
  To: David Miller
  Cc: netdev, linux-s390, Martin Schwidefsky, Heiko Carstens,
	Stefan Raspl, Ursula Braun, Julian Wiedmann

qeth_get_ipacmd_buffer() obtains its buffers for building IPA cmds from
__qeth_get_buffer(), where they are fully cleared. So get rid of all the
additional zero-ing in various other places.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
 drivers/s390/net/qeth_core_main.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 100b6f4a3fb8..89e09e7b8fff 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -780,7 +780,6 @@ void qeth_release_buffer(struct qeth_channel *channel,
 
 	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
 	spin_lock_irqsave(&channel->iob_lock, flags);
-	memset(iob->data, 0, QETH_BUFSIZE);
 	iob->state = BUF_STATE_FREE;
 	iob->callback = qeth_send_control_data_cb;
 	iob->rc = 0;
@@ -1334,8 +1333,8 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
 		return 0;
 
 	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
-		channel->iob[cnt].data =
-			kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+		channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
+						 GFP_KERNEL | GFP_DMA);
 		if (channel->iob[cnt].data == NULL)
 			break;
 		channel->iob[cnt].state = BUF_STATE_FREE;
@@ -2888,10 +2887,10 @@ static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
 }
 
 static void qeth_fill_ipacmd_header(struct qeth_card *card,
-		struct qeth_ipa_cmd *cmd, __u8 command,
-		enum qeth_prot_versions prot)
+				    struct qeth_ipa_cmd *cmd,
+				    enum qeth_ipa_cmds command,
+				    enum qeth_prot_versions prot)
 {
-	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
 	cmd->hdr.command = command;
 	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
 	/* cmd->hdr.seqno is set by qeth_send_control_data() */
@@ -2903,8 +2902,6 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
 		cmd->hdr.prim_version_no = 1;
 	cmd->hdr.param_count = 1;
 	cmd->hdr.prot_version = prot;
-	cmd->hdr.ipa_supported = 0;
-	cmd->hdr.ipa_enabled = 0;
 }
 
 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
@@ -5494,8 +5491,6 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
 		cmd->data.setassparms.hdr.assist_no = ipa_func;
 		cmd->data.setassparms.hdr.length = 8 + len;
 		cmd->data.setassparms.hdr.command_code = cmd_code;
-		cmd->data.setassparms.hdr.return_code = 0;
-		cmd->data.setassparms.hdr.seq_no = 0;
 	}
 
 	return iob;
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 00/15] s390/qeth: updates 2018-09-17
  2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
                   ` (14 preceding siblings ...)
  2018-09-17 15:36 ` [PATCH net-next 15/15] s390/qeth: reduce 0-initializing when building IPA cmds Julian Wiedmann
@ 2018-09-17 16:10 ` David Miller
  15 siblings, 0 replies; 17+ messages in thread
From: David Miller @ 2018-09-17 16:10 UTC (permalink / raw)
  To: jwi; +Cc: netdev, linux-s390, schwidefsky, heiko.carstens, raspl, ubraun

From: Julian Wiedmann <jwi@linux.ibm.com>
Date: Mon, 17 Sep 2018 17:35:54 +0200

> please apply the following patchset to net-next. This brings more restructuring
> of qeth's transmit code (eliminating its last usage of skb_realloc_headroom()),
> and the usual mix of minor improvements & cleanups.

Series applied, thank you.

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2018-09-17 21:38 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-17 15:35 [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 Julian Wiedmann
2018-09-17 15:35 ` [PATCH net-next 01/15] s390/qeth: move L2 xmit code to core module Julian Wiedmann
2018-09-17 15:35 ` [PATCH net-next 02/15] s390/qeth: run non-offload L3 traffic over common xmit path Julian Wiedmann
2018-09-17 15:35 ` [PATCH net-next 03/15] s390/qeth: remove unused L3 xmit code Julian Wiedmann
2018-09-17 15:35 ` [PATCH net-next 04/15] s390/qeth: remove qeth_get_elements_no() Julian Wiedmann
2018-09-17 15:35 ` [PATCH net-next 05/15] s390/qeth: limit csum offload erratum to L3 devices Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 06/15] s390/qeth: fix up protocol headers early Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 07/15] s390/qeth: check size of required HW header cache object Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 08/15] s390/qeth: prepare for copy-free TSO transmission Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 09/15] s390/qeth: speed up " Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 10/15] s390/qeth: remove qeth_hdr_chk_and_bounce() Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 11/15] s390/qeth: uninstall IRQ handler on device removal Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 12/15] s390/qeth: invoke softirqs after napi_schedule() Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 13/15] s390/qeth: fix typo in return value Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 14/15] s390/qeth: fine-tune spinlocks Julian Wiedmann
2018-09-17 15:36 ` [PATCH net-next 15/15] s390/qeth: reduce 0-initializing when building IPA cmds Julian Wiedmann
2018-09-17 16:10 ` [PATCH net-next 00/15] s390/qeth: updates 2018-09-17 David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).