All of lore.kernel.org
 help / color / mirror / Atom feed
* [Crypto v4 03/12] support for inline tls
@ 2018-02-12 12:04 Atul Gupta
  2018-02-12 19:48 ` David Miller
  0 siblings, 1 reply; 3+ messages in thread
From: Atul Gupta @ 2018-02-12 12:04 UTC (permalink / raw)
  To: davejwatson, herbert; +Cc: sd, linux-crypto, davem, netdev, ganeshgr

Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
---
 net/tls/tls_main.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 113 insertions(+)

diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e07ee3a..10a6d5d 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -38,6 +38,7 @@
 #include <linux/highmem.h>
 #include <linux/netdevice.h>
 #include <linux/sched/signal.h>
+#include <linux/inetdevice.h>
 
 #include <net/tls.h>
 
@@ -48,9 +49,12 @@
 enum {
 	TLS_BASE_TX,
 	TLS_SW_TX,
+	TLS_FULL_HW, /* TLS record processed Inline */
 	TLS_NUM_CONFIG,
 };
 
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_mutex);
 static struct proto tls_prots[TLS_NUM_CONFIG];
 
 static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
@@ -448,6 +452,92 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
 	return do_tls_setsockopt(sk, optname, optval, optlen);
 }
 
+static struct net_device *find_netdev(struct sock *sk)
+{
+	struct net_device *netdev = NULL;
+
+	netdev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
+	return netdev;
+}
+
+static int get_tls_prot(struct sock *sk)
+{
+	struct tls_context *ctx = tls_get_ctx(sk);
+	struct net_device *netdev;
+	struct tls_device *dev;
+
+	/* Device bound to specific IP */
+	if (inet_sk(sk)->inet_rcv_saddr) {
+		netdev = find_netdev(sk);
+		if (!netdev)
+			goto out;
+
+		/* Device supports Inline record processing */
+		if (!(netdev->features & NETIF_F_HW_TLS_INLINE))
+			goto out;
+
+		mutex_lock(&device_mutex);
+		list_for_each_entry(dev, &device_list, dev_list) {
+			if (dev->netdev && dev->netdev(dev, netdev))
+				break;
+		}
+		mutex_unlock(&device_mutex);
+
+		ctx->tx_conf = TLS_FULL_HW;
+		if (dev->prot)
+			dev->prot(dev, sk);
+	} else { /* src address not known or INADDR_ANY */
+		mutex_lock(&device_mutex);
+		list_for_each_entry(dev, &device_list, dev_list) {
+			if (dev->feature && dev->feature(dev)) {
+				ctx->tx_conf = TLS_FULL_HW;
+				break;
+			}
+		}
+		mutex_unlock(&device_mutex);
+		update_sk_prot(sk, ctx);
+	}
+out:
+	return ctx->tx_conf;
+}
+
+static int tls_hw_prot(struct sock *sk)
+{
+	/* search registered tls device for netdev */
+	return get_tls_prot(sk);
+}
+
+static void tls_hw_unhash(struct sock *sk)
+{
+	struct tls_device *dev;
+
+	mutex_lock(&device_mutex);
+	list_for_each_entry(dev, &device_list, dev_list) {
+		if (dev->unhash)
+			dev->unhash(dev, sk);
+	}
+	mutex_unlock(&device_mutex);
+	tcp_prot.unhash(sk);
+}
+
+static int tls_hw_hash(struct sock *sk)
+{
+	struct tls_device *dev;
+	int err;
+
+	err = tcp_prot.hash(sk);
+	mutex_lock(&device_mutex);
+	list_for_each_entry(dev, &device_list, dev_list) {
+		if (dev->hash)
+			err |= dev->hash(dev, sk);
+	}
+	mutex_unlock(&device_mutex);
+
+	if (err)
+		tls_hw_unhash(sk);
+	return err;
+}
+
 static int tls_init(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
@@ -466,6 +556,9 @@ static int tls_init(struct sock *sk)
 	ctx->sk_proto_close = sk->sk_prot->close;
 
 	ctx->tx_conf = TLS_BASE_TX;
+	if (tls_hw_prot(sk) == TLS_FULL_HW)
+		goto out;
+
 	update_sk_prot(sk, ctx);
 out:
 	return rc;
@@ -487,7 +580,27 @@ static void build_protos(struct proto *prot, struct proto *base)
 	prot[TLS_SW_TX] = prot[TLS_BASE_TX];
 	prot[TLS_SW_TX].sendmsg		= tls_sw_sendmsg;
 	prot[TLS_SW_TX].sendpage	= tls_sw_sendpage;
+
+	prot[TLS_FULL_HW]               = prot[TLS_BASE_TX];
+	prot[TLS_FULL_HW].hash          = tls_hw_hash;
+	prot[TLS_FULL_HW].unhash        = tls_hw_unhash;
+}
+
+void tls_register_device(struct tls_device *device)
+{
+	mutex_lock(&device_mutex);
+	list_add_tail(&device->dev_list, &device_list);
+	mutex_unlock(&device_mutex);
+}
+EXPORT_SYMBOL(tls_register_device);
+
+void tls_unregister_device(struct tls_device *device)
+{
+	mutex_lock(&device_mutex);
+	list_del(&device->dev_list);
+	mutex_unlock(&device_mutex);
 }
+EXPORT_SYMBOL(tls_unregister_device);
 
 static int __init tls_register(void)
 {
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [Crypto v4 03/12] support for inline tls
  2018-02-12 12:04 [Crypto v4 03/12] support for inline tls Atul Gupta
@ 2018-02-12 19:48 ` David Miller
  2018-02-13 18:20   ` Atul Gupta
  0 siblings, 1 reply; 3+ messages in thread
From: David Miller @ 2018-02-12 19:48 UTC (permalink / raw)
  To: atul.gupta; +Cc: davejwatson, herbert, sd, linux-crypto, netdev, ganeshgr

From: Atul Gupta <atul.gupta@chelsio.com>
Date: Mon, 12 Feb 2018 17:34:28 +0530

> +static int get_tls_prot(struct sock *sk)
> +{
> +	struct tls_context *ctx = tls_get_ctx(sk);
> +	struct net_device *netdev;
> +	struct tls_device *dev;
> +
> +	/* Device bound to specific IP */
> +	if (inet_sk(sk)->inet_rcv_saddr) {
> +		netdev = find_netdev(sk);
> +		if (!netdev)
> +			goto out;
> +
> +		/* Device supports Inline record processing */
> +		if (!(netdev->features & NETIF_F_HW_TLS_INLINE))
> +			goto out;
> +
> +		mutex_lock(&device_mutex);
> +		list_for_each_entry(dev, &device_list, dev_list) {
> +			if (dev->netdev && dev->netdev(dev, netdev))
> +				break;
> +		}
> +		mutex_unlock(&device_mutex);
> +
> +		ctx->tx_conf = TLS_FULL_HW;
> +		if (dev->prot)
> +			dev->prot(dev, sk);

What if the same IP address is configured on multiple interfaces?

> +	} else { /* src address not known or INADDR_ANY */
> +		mutex_lock(&device_mutex);
> +		list_for_each_entry(dev, &device_list, dev_list) {
> +			if (dev->feature && dev->feature(dev)) {
> +				ctx->tx_conf = TLS_FULL_HW;
> +				break;
> +			}
> +		}
> +		mutex_unlock(&device_mutex);
> +		update_sk_prot(sk, ctx);

And I think this is even more of a stretch.  Just because you find
an inline TLS device on the global list doesn't mean traffic will
necessarily flow through it once the connection is fully established
and therefore be able to provide inline TLS offloading.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [Crypto v4 03/12] support for inline tls
  2018-02-12 19:48 ` David Miller
@ 2018-02-13 18:20   ` Atul Gupta
  0 siblings, 0 replies; 3+ messages in thread
From: Atul Gupta @ 2018-02-13 18:20 UTC (permalink / raw)
  To: David Miller; +Cc: davejwatson, herbert, sd, linux-crypto, netdev, Ganesh GR



-----Original Message-----
From: David Miller [mailto:davem@davemloft.net] 
Sent: Tuesday, February 13, 2018 1:19 AM
To: Atul Gupta <atul.gupta@chelsio.com>
Cc: davejwatson@fb.com; herbert@gondor.apana.org.au; sd@queasysnail.net; linux-crypto@vger.kernel.org; netdev@vger.kernel.org; Ganesh GR <ganeshgr@chelsio.com>
Subject: Re: [Crypto v4 03/12] support for inline tls

From: Atul Gupta <atul.gupta@chelsio.com>
Date: Mon, 12 Feb 2018 17:34:28 +0530

> +static int get_tls_prot(struct sock *sk) {
> +	struct tls_context *ctx = tls_get_ctx(sk);
> +	struct net_device *netdev;
> +	struct tls_device *dev;
> +
> +	/* Device bound to specific IP */
> +	if (inet_sk(sk)->inet_rcv_saddr) {
> +		netdev = find_netdev(sk);
> +		if (!netdev)
> +			goto out;
> +
> +		/* Device supports Inline record processing */
> +		if (!(netdev->features & NETIF_F_HW_TLS_INLINE))
> +			goto out;
> +
> +		mutex_lock(&device_mutex);
> +		list_for_each_entry(dev, &device_list, dev_list) {
> +			if (dev->netdev && dev->netdev(dev, netdev))
> +				break;
> +		}
> +		mutex_unlock(&device_mutex);
> +
> +		ctx->tx_conf = TLS_FULL_HW;
> +		if (dev->prot)
> +			dev->prot(dev, sk);

What if the same IP address is configured on multiple interfaces?

Thanks, I overlooked this point.
The checks above were based on the premise that device chosen is indeed the one with Inline TLS enabled, net_device corresponding to specific IP address, feature configured for device from ethtool and net_device corresponds to Inline TLS driver registered with net tls.

Case with same IP configured on multiple interface looks similar to INADDR_ANY below. 

The TLS_FULL_HW and modified hash routines handles devices with/without Inline TLS support. The first Inline TLS capable device updates sk_prot for TLS_FULL_HW. The tls_hw_hash listens on all interfaces and process device specific routine, the listen however succeeds for device on which connect is initiated and may not have the Inline TLS capability, such connection continues in TLS_BASE_TX or non-tls-offload mode. On the other hand, if Inline TLS capable device were to establish connection it updates the prot as required for offload mode to continue.

> +	} else { /* src address not known or INADDR_ANY */
> +		mutex_lock(&device_mutex);
> +		list_for_each_entry(dev, &device_list, dev_list) {
> +			if (dev->feature && dev->feature(dev)) {
> +				ctx->tx_conf = TLS_FULL_HW;
> +				break;
> +			}
> +		}
> +		mutex_unlock(&device_mutex);
> +		update_sk_prot(sk, ctx);

And I think this is even more of a stretch.  Just because you find an inline TLS device on the global list doesn't mean traffic will necessarily flow through it once the connection is fully established and therefore be able to provide inline TLS offloading.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-02-13 18:20 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-12 12:04 [Crypto v4 03/12] support for inline tls Atul Gupta
2018-02-12 19:48 ` David Miller
2018-02-13 18:20   ` Atul Gupta

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.