All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net] tcp: ensure proper barriers in lockless contexts
@ 2015-11-12 16:43 Eric Dumazet
  2015-11-15  1:20 ` Herbert Xu
  2015-11-15 23:37 ` David Miller
  0 siblings, 2 replies; 7+ messages in thread
From: Eric Dumazet @ 2015-11-12 16:43 UTC (permalink / raw)
  To: David Miller; +Cc: netdev

From: Eric Dumazet <edumazet@google.com>

Some functions access TCP sockets without holding a lock and
might output non consistent data, depending on compiler and or
architecture.

tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...

Introduce sk_state_load() and sk_state_store() to fix the issues,
and more clearly document where this lack of locking is happening.

Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 include/net/sock.h              |   25 +++++++++++++++++++++++++
 net/ipv4/inet_connection_sock.c |    4 ++--
 net/ipv4/tcp.c                  |   21 +++++++++++----------
 net/ipv4/tcp_diag.c             |    2 +-
 net/ipv4/tcp_ipv4.c             |   14 ++++++++------
 net/ipv6/tcp_ipv6.c             |   19 +++++++++++++++----
 6 files changed, 62 insertions(+), 23 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index bbf7c2cf15b4..7f89e4ba18d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2226,6 +2226,31 @@ static inline bool sk_listener(const struct sock *sk)
 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
+/**
+ * sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with sk_state_store(). Used in places we do not hold socket lock :
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int sk_state_load(const struct sock *sk)
+{
+	return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+static inline void sk_state_store(struct sock *sk, int newstate)
+{
+	smp_store_release(&sk->sk_state, newstate);
+}
+
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f23de8..46b9c887bede 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_listener->sk_state != TCP_LISTEN)
+	if (sk_state_load(sk_listener) != TCP_LISTEN)
 		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 	 * It is OK, because this socket enters to hash table only
 	 * after validation is complete.
 	 */
-	sk->sk_state = TCP_LISTEN;
+	sk_state_store(sk, TCP_LISTEN);
 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 		inet->inet_sport = htons(inet->inet_num);
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0c1e80..c1728771cf89 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	unsigned int mask;
 	struct sock *sk = sock->sk;
 	const struct tcp_sock *tp = tcp_sk(sk);
+	int state;
 
 	sock_rps_record_flow(sk);
 
 	sock_poll_wait(file, sk_sleep(sk), wait);
-	if (sk->sk_state == TCP_LISTEN)
+
+	state = sk_state_load(sk);
+	if (state == TCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
 	/* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 	 * blocking on fresh not-connected or disconnected socket. --ANK
 	 */
-	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
+	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
 		mask |= POLLHUP;
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
 	/* Connected or passive Fast Open socket? */
-	if (sk->sk_state != TCP_SYN_SENT &&
-	    (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+	if (state != TCP_SYN_SENT &&
+	    (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
 		int target = sock_rcvlowat(sk, 0, INT_MAX);
 
 		if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 		    tp->urg_data)
 			target++;
 
-		/* Potential race condition. If read of tp below will
-		 * escape above sk->sk_state, we can be illegally awaken
-		 * in SYN_* states. */
 		if (tp->rcv_nxt - tp->copied_seq >= target)
 			mask |= POLLIN | POLLRDNORM;
 
@@ -1934,7 +1934,7 @@ void tcp_set_state(struct sock *sk, int state)
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk->sk_state = state;
+	sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2644,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 	if (sk->sk_type != SOCK_STREAM)
 		return;
 
-	info->tcpi_state = sk->sk_state;
+	info->tcpi_state = sk_state_load(sk);
+
 	info->tcpi_ca_state = icsk->icsk_ca_state;
 	info->tcpi_retransmits = icsk->icsk_retransmits;
 	info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2673,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 	info->tcpi_snd_mss = tp->mss_cache;
 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
 
-	if (sk->sk_state == TCP_LISTEN) {
+	if (info->tcpi_state == TCP_LISTEN) {
 		info->tcpi_unacked = sk->sk_ack_backlog;
 		info->tcpi_sacked = sk->sk_max_ack_backlog;
 	} else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f34946177..b31604086edd 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 {
 	struct tcp_info *info = _info;
 
-	if (sk->sk_state == TCP_LISTEN) {
+	if (sk_state_load(sk) == TCP_LISTEN) {
 		r->idiag_rqueue = sk->sk_ack_backlog;
 		r->idiag_wqueue = sk->sk_max_ack_backlog;
 	} else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0cdf2..5183172dd6b5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
 	int rx_queue;
+	int state;
 
 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		timer_expires = jiffies;
 	}
 
-	if (sk->sk_state == TCP_LISTEN)
+	state = sk_state_load(sk);
+	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
 	else
-		/*
-		 * because we dont lock socket, we might find a transient negative value
+		/* Because we don't lock the socket,
+		 * we might find a transient negative value.
 		 */
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
-		i, src, srcp, dest, destp, sk->sk_state,
+		i, src, srcp, dest, destp, state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
@@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		jiffies_to_clock_t(icsk->icsk_ack.ato),
 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		tp->snd_cwnd,
-		sk->sk_state == TCP_LISTEN ?
-		    (fastopenq ? fastopenq->max_qlen : 0) :
+		state == TCP_LISTEN ?
+		    fastopenq->max_qlen :
 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
 }
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e754e41..3349da9a2996 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1690,6 +1690,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 	const struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
+	int rx_queue;
+	int state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1712,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		timer_expires = jiffies;
 	}
 
+	state = sk_state_load(sp);
+	if (state == TCP_LISTEN)
+		rx_queue = sp->sk_ack_backlog;
+	else
+		/* Because we don't lock the socket,
+		 * we might find a transient negative value.
+		 */
+		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+
 	seq_printf(seq,
 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   sp->sk_state,
-		   tp->write_seq-tp->snd_una,
-		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
+		   state,
+		   tp->write_seq - tp->snd_una,
+		   rx_queue,
 		   timer_active,
 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
 		   icsk->icsk_retransmits,
@@ -1732,7 +1743,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		   tp->snd_cwnd,
-		   sp->sk_state == TCP_LISTEN ?
+		   state == TCP_LISTEN ?
 			fastopenq->max_qlen :
 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
 		   );

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-12 16:43 [PATCH net] tcp: ensure proper barriers in lockless contexts Eric Dumazet
@ 2015-11-15  1:20 ` Herbert Xu
  2015-11-15  3:21   ` Eric Dumazet
  2015-11-15 23:37 ` David Miller
  1 sibling, 1 reply; 7+ messages in thread
From: Herbert Xu @ 2015-11-15  1:20 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: davem, netdev

Eric Dumazet <eric.dumazet@gmail.com> wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Some functions access TCP sockets without holding a lock and
> might output non consistent data, depending on compiler and or
> architecture.
> 
> tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...

For the information gathering ones such as tcp_diag_get_info I'm
wondering whether we really need these memory barriers.  After all,
if it's truly lockless then surely the TCP socket state can change
again after you load the state the first time, in which case the
barrier becomes completely meaningless.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-15  1:20 ` Herbert Xu
@ 2015-11-15  3:21   ` Eric Dumazet
  2015-11-15  3:32     ` Herbert Xu
  0 siblings, 1 reply; 7+ messages in thread
From: Eric Dumazet @ 2015-11-15  3:21 UTC (permalink / raw)
  To: Herbert Xu; +Cc: davem, netdev

On Sun, 2015-11-15 at 09:20 +0800, Herbert Xu wrote:
> Eric Dumazet <eric.dumazet@gmail.com> wrote:
> > From: Eric Dumazet <edumazet@google.com>
> > 
> > Some functions access TCP sockets without holding a lock and
> > might output non consistent data, depending on compiler and or
> > architecture.
> > 
> > tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
> 
> For the information gathering ones such as tcp_diag_get_info I'm
> wondering whether we really need these memory barriers.  After all,
> if it's truly lockless then surely the TCP socket state can change
> again after you load the state the first time, in which case the
> barrier becomes completely meaningless.

Not a big deal, no crash or kernel instability, just making the
information a bit more consistent.

They are not truly needed, but the patch avoids some discrepancies when
an observer gets the data, for a minimum cost ( compiler barrier() in
most cases)

Like : 

       state = sk_state_load(sp);
       if (state == TCP_LISTEN)
               rx_queue = sp->sk_ack_backlog;
       else
               /* Because we don't lock the socket,
                * we might find a transient negative value.
                */
               rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);

or :

       if (sk_state_load(sk) == TCP_LISTEN) {
                r->idiag_rqueue = sk->sk_ack_backlog;
                r->idiag_wqueue = sk->sk_max_ack_backlog;


This patch makes sure sk_ack_backlog is written before sk_state is set
to TCP_LISTEN, otherwise one could see a '0' listener backlog.

We had a spurious kernel log for a similar issue that was solved in
commit f985c65c908f6b26c30019a83dc5ea295f5fcf62
("tcp: avoid spurious SYN flood detection at listen() time")

Sure, we can live with a spurious log, but experience showed that taking
care of this early could avoid lot of hassle, say in 12 months when
people start using linux-4.4 

Thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-15  3:21   ` Eric Dumazet
@ 2015-11-15  3:32     ` Herbert Xu
  0 siblings, 0 replies; 7+ messages in thread
From: Herbert Xu @ 2015-11-15  3:32 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: davem, netdev

On Sat, Nov 14, 2015 at 07:21:41PM -0800, Eric Dumazet wrote:
>
> This patch makes sure sk_ack_backlog is written before sk_state is set
> to TCP_LISTEN, otherwise one could see a '0' listener backlog.
> 
> We had a spurious kernel log for a similar issue that was solved in
> commit f985c65c908f6b26c30019a83dc5ea295f5fcf62
> ("tcp: avoid spurious SYN flood detection at listen() time")
> 
> Sure, we can live with a spurious log, but experience showed that taking
> care of this early could avoid lot of hassle, say in 12 months when
> people start using linux-4.4 

Oh I have no problems whatsoever with adding barriers where they
are needed, such as in this particular spot.  What I do have an
issue with though is the fact that your patch in its current form
may give future TCP developers a false sense of security.

I know that you as the person who added these helpers know exactly
what they do and don't do.  But the next guy who comes along may
not have that complete understanding and they may think that these
helpers give them the right to do things locklessly.

So personally I'd prefer explicit barriers in the spots where they
are needed with detailed comments as opposed to these helpers which
appear to offer guarrantees that they can't really give.

Or perhaps give these helpers names that make people think twice,
e.g., tcp_load_state_unsafe.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-12 16:43 [PATCH net] tcp: ensure proper barriers in lockless contexts Eric Dumazet
  2015-11-15  1:20 ` Herbert Xu
@ 2015-11-15 23:37 ` David Miller
  2015-11-17  1:45   ` Paul Gortmaker
  1 sibling, 1 reply; 7+ messages in thread
From: David Miller @ 2015-11-15 23:37 UTC (permalink / raw)
  To: eric.dumazet; +Cc: netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Thu, 12 Nov 2015 08:43:18 -0800

> From: Eric Dumazet <edumazet@google.com>
> 
> Some functions access TCP sockets without holding a lock and
> might output non consistent data, depending on compiler and or
> architecture.
> 
> tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
> 
> Introduce sk_state_load() and sk_state_store() to fix the issues,
> and more clearly document where this lack of locking is happening.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>

Applied, thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-15 23:37 ` David Miller
@ 2015-11-17  1:45   ` Paul Gortmaker
  2015-11-17  1:55     ` David Miller
  0 siblings, 1 reply; 7+ messages in thread
From: Paul Gortmaker @ 2015-11-17  1:45 UTC (permalink / raw)
  To: David Miller; +Cc: eric.dumazet, netdev, linux-next

On Sun, Nov 15, 2015 at 6:37 PM, David Miller <davem@davemloft.net> wrote:
> From: Eric Dumazet <eric.dumazet@gmail.com>
> Date: Thu, 12 Nov 2015 08:43:18 -0800
>
>> From: Eric Dumazet <edumazet@google.com>
>>
>> Some functions access TCP sockets without holding a lock and
>> might output non consistent data, depending on compiler and or
>> architecture.
>>
>> tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
>>
>> Introduce sk_state_load() and sk_state_store() to fix the issues,
>> and more clearly document where this lack of locking is happening.
>>
>> Signed-off-by: Eric Dumazet <edumazet@google.com>
>
> Applied, thanks.

Just a heads up that this breaks all arm64 builds in linux-next from
Monday; bisect says:

00fd38d938db3f1ab1c486549afc450cb7e751b1 is the first bad commit
commit 00fd38d938db3f1ab1c486549afc450cb7e751b1
Author: Eric Dumazet
<edumazet@google.com>http://kisskb.ellerman.id.au/kisskb/buildresult/12548450/
Date:   Thu Nov 12 08:43:18 2015 -0800

    tcp: ensure proper barriers in lockless contexts

Here is one of the linux-next fails:

http://kisskb.ellerman.id.au/kisskb/buildresult/12548450/

Paul.
--


> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net] tcp: ensure proper barriers in lockless contexts
  2015-11-17  1:45   ` Paul Gortmaker
@ 2015-11-17  1:55     ` David Miller
  0 siblings, 0 replies; 7+ messages in thread
From: David Miller @ 2015-11-17  1:55 UTC (permalink / raw)
  To: paul.gortmaker; +Cc: eric.dumazet, netdev, linux-next

From: Paul Gortmaker <paul.gortmaker@windriver.com>
Date: Mon, 16 Nov 2015 20:45:25 -0500

> Just a heads up that this breaks all arm64 builds in linux-next from
> Monday; bisect says:
> 
> 00fd38d938db3f1ab1c486549afc450cb7e751b1 is the first bad commit
> commit 00fd38d938db3f1ab1c486549afc450cb7e751b1
> Author: Eric Dumazet
> <edumazet@google.com>http://kisskb.ellerman.id.au/kisskb/buildresult/12548450/
> Date:   Thu Nov 12 08:43:18 2015 -0800
> 
>     tcp: ensure proper barriers in lockless contexts
> 
> Here is one of the linux-next fails:
> 
> http://kisskb.ellerman.id.au/kisskb/buildresult/12548450/

Thanks for the report Paul.

I think Eric's patch is correct.  I see what ARM is trying to do here,
but it really has to accomodate const arguments to smp_load_acquire().

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-11-17  1:55 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-12 16:43 [PATCH net] tcp: ensure proper barriers in lockless contexts Eric Dumazet
2015-11-15  1:20 ` Herbert Xu
2015-11-15  3:21   ` Eric Dumazet
2015-11-15  3:32     ` Herbert Xu
2015-11-15 23:37 ` David Miller
2015-11-17  1:45   ` Paul Gortmaker
2015-11-17  1:55     ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.