All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] new match extension `flow'
@ 2004-10-28  2:05 Josh Samuelson
  2004-10-28 20:15 ` Josh Samuelson
  2004-10-29 19:32 ` Pablo Neira
  0 siblings, 2 replies; 10+ messages in thread
From: Josh Samuelson @ 2004-10-28  2:05 UTC (permalink / raw)
  To: netfilter-devel

[-- Attachment #1: Type: text/plain, Size: 1967 bytes --]

Greetings,

The attached diff files add to iptables the ability to match max
connections allowed for various IP protocols: generic IP, ICMP, TCP
and UDP.

kernel changes:
The patch file is for 2.6.9.
net/ipv4/netfilter/ip_conntrack_standalone.c must be fixed by a
patch I sent to the list earlier; see line 819 for the problem.
It modifies some files relevant to ip_conntrack and adds a ipt_flow.c
match module.  When a new conntrack is created, a flow structure will
have counters incremented for the type of protocol being used, indexed
by the original direction source IP address.  All current flows can
be viewed in the "/proc/net/ip_conntrack_flow" file.  The current
number of flows can be viewed from the
"/proc/sys/net/ipv4/netfilter/ip_conntrack_flow_count" file.

# cat /proc/net/ip_conntrack_flow 
192.168.1.7 IP: 1 ICMP: 0 TCP: 1 UDP: 0
192.168.1.254 IP: 1 ICMP: 0 TCP: 0 UDP: 0
192.168.1.7 IP: 1 ICMP: 0 TCP: 1 UDP: 0
192.168.1.55 IP: 1 ICMP: 0 TCP: 1 UDP: 0
192.168.1.121 IP: 2 ICMP: 0 TCP: 0 UDP: 2
# cat /proc/sys/net/ipv4/netfilter/ip_conntrack_flow_count
5
#

iptables changes:
The patch is for iptables-1.2.11.  It adds the source to allow
iptables to have the flow match via the shared library API that
iptables defines.  The module adds the following options:
--maxip n   (which can only be used when no protocol is specified)
--maxicmp n (allowed with -p icmp)
--maxtcp n  (allowed with -p tcp)
--maxudp n  (allowed with -p udp)
The flow match module can only be used in the filter table.

An example of the usage:

iptables -A FORWARD -p tcp -s 192.168.1.0/24 -m flow --maxtcp 150 \
-m state --state NEW -j REJECT --reject-with tcp-reset

This would deny new TCP connections from all hosts routing
through the machine from the 192.168.1.0/24 network that already
have 150 connections.

Hope some people find this useful!  Original idea from John Dunning,
see "Flow count module" from Tue Oct 12 14:54:20 CEST 2004.

Questions, comments?

-Josh

[-- Attachment #2: linux-2.6.9-flow-20041027.diff --]
[-- Type: text/plain, Size: 20127 bytes --]

diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack.h	2004-10-18 16:55:21.000000000 -0500
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack.h	2004-10-27 18:22:39.867487432 -0500
@@ -6,6 +6,7 @@
 
 #include <linux/config.h>
 #include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
+#include <linux/netfilter_ipv4/ip_conntrack_flow.h>
 #include <linux/bitops.h>
 #include <linux/compiler.h>
 #include <asm/atomic.h>
@@ -291,6 +292,7 @@
 }
 
 extern unsigned int ip_conntrack_htable_size;
+extern unsigned int ip_conntrack_flow_htable_size;
  
 struct ip_conntrack_stat
 {
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_core.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack_core.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_core.h	2004-10-18 16:55:35.000000000 -0500
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack_core.h	2004-10-27 18:22:39.867487432 -0500
@@ -17,6 +17,9 @@
 
 struct ip_conntrack_protocol;
 
+extern u_int32_t
+hash_flow(u_int32_t ip);
+
 extern int
 ip_ct_get_tuple(const struct iphdr *iph,
 		const struct sk_buff *skb,
@@ -46,6 +49,7 @@
 }
 
 extern struct list_head *ip_conntrack_hash;
+extern struct list_head *ip_conntrack_flow_hash;
 extern struct list_head ip_conntrack_expect_list;
 DECLARE_RWLOCK_EXTERN(ip_conntrack_lock);
 DECLARE_RWLOCK_EXTERN(ip_conntrack_expect_tuple_lock);
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_conntrack_flow.h	2004-10-27 18:22:39.868487280 -0500
@@ -0,0 +1,27 @@
+#ifndef _IP_CONNTRACK_FLOW_H
+#define _IP_CONNTRACK_FLOW_H
+
+/* A `flow' is a structure containing the IP connection count
+  on various IP protocols.
+*/
+
+struct ip_conntrack_flow
+{
+#ifdef __KERNEL__
+	struct list_head list;
+	atomic_t use;
+#else
+	struct
+	{
+		void *next,
+		     *prev;
+	} list;
+	u_int32_t use;
+#endif /* __KERNEL__ */
+	u_int32_t ip_ct_dir_original_ip;
+	u_int16_t icmp,
+		  tcp,
+		  udp;
+};
+
+#endif /* _IP_CONNTRACK_FLOW_H */
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h	2004-10-27 18:22:39.868487280 -0500
@@ -0,0 +1,21 @@
+#ifndef _IPT_FLOW_H
+#define _IPT_FLOW_H
+
+typedef enum
+{
+	IPFLOW_IP = 1,
+	IPFLOW_ICMP = 2,
+	IPFLOW_TCP = 4,
+	IPFLOW_UDP = 8
+} ipflow_t;
+
+struct ipt_flow_info
+{
+	ipflow_t  proto;
+	u_int16_t max_ip,
+		  max_icmp,
+		  max_tcp,
+		  max_udp;
+};
+
+#endif /* IPT_FLOW_H */
diff -Pru linux-2.6.9/include/linux/sysctl.h linux-2.6.9-flow/include/linux/sysctl.h
--- linux-2.6.9/include/linux/sysctl.h	2004-10-18 16:54:31.000000000 -0500
+++ linux-2.6.9-flow/include/linux/sysctl.h	2004-10-27 18:22:39.871486824 -0500
@@ -426,6 +426,7 @@
  	NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
  	NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
 	NET_IPV4_NF_CONNTRACK_COUNT=27,
+	NET_IPV4_NF_CONNTRACK_FLOW_COUNT=28,
 };
  
 /* /proc/sys/net/ipv6 */
diff -Pru linux-2.6.9/net/ipv4/netfilter/Kconfig linux-2.6.9-flow/net/ipv4/netfilter/Kconfig
--- linux-2.6.9/net/ipv4/netfilter/Kconfig	2004-10-18 16:54:55.000000000 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/Kconfig	2004-10-27 18:22:39.873486520 -0500
@@ -342,6 +342,15 @@
 	  If you want to compile it as a module, say M here and read
 	  Documentation/modules.txt.  If unsure, say `N'.
 
+config IP_NF_MATCH_FLOW
+	tristate 'flow match support (EXPERIMENTAL)'
+	depends on IP_NF_CONNTRACK && IP_NF_FILTER && EXPERIMENTAL
+	help
+	  This option adds a 'flow' match.  With this match you can
+	  specify the maximum allowed connections for original direction
+	  conntracks.  The matches can be made against generic IP, ICMP,
+	  TCP or UDP flows.
+
 # `filter', generic and specific targets
 config IP_NF_FILTER
 	tristate "Packet filtering"
diff -Pru linux-2.6.9/net/ipv4/netfilter/Makefile linux-2.6.9-flow/net/ipv4/netfilter/Makefile
--- linux-2.6.9/net/ipv4/netfilter/Makefile	2004-10-18 16:53:43.000000000 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/Makefile	2004-10-27 18:22:39.874486368 -0500
@@ -67,6 +67,7 @@
 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
 obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
 obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
+obj-$(CONFIG_IP_NF_MATCH_FLOW) += ipt_flow.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.9-flow/net/ipv4/netfilter/ip_conntrack_core.c
--- linux-2.6.9/net/ipv4/netfilter/ip_conntrack_core.c	2004-10-18 16:53:05.000000000 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/ip_conntrack_core.c	2004-10-27 20:17:20.991397592 -0500
@@ -62,16 +62,21 @@
 
 /* ip_conntrack_standalone needs this */
 atomic_t ip_conntrack_count = ATOMIC_INIT(0);
+atomic_t ip_conntrack_flow_count = ATOMIC_INIT(0);
 EXPORT_SYMBOL(ip_conntrack_count);
+EXPORT_SYMBOL(ip_conntrack_flow_count);
 
 void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
 LIST_HEAD(ip_conntrack_expect_list);
 struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO];
 static LIST_HEAD(helpers);
 unsigned int ip_conntrack_htable_size = 0;
+unsigned int ip_conntrack_flow_htable_size = 0;
 int ip_conntrack_max;
 struct list_head *ip_conntrack_hash;
+struct list_head *ip_conntrack_flow_hash;
 static kmem_cache_t *ip_conntrack_cachep;
+static kmem_cache_t *ip_conntrack_flow_cachep;
 static kmem_cache_t *ip_conntrack_expect_cachep;
 struct ip_conntrack ip_conntrack_untracked;
 unsigned int ip_ct_log_invalid;
@@ -100,6 +105,112 @@
 	                     ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
 }
 
+u_int32_t
+hash_flow(u_int32_t ip)
+{
+	return(jhash_1word(ip, ip_conntrack_hash_rnd) % ip_conntrack_flow_htable_size);
+}
+
+#define INCREMENT_WITHOUT_OVERFLOW(c) if (c < ((1 << (sizeof(c) * 8)) - 1)) c++
+
+int
+ip_conntrack_flow_inc(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	WRITE_LOCK(&ip_conntrack_lock);
+	if (!flow) {
+		flow = kmem_cache_alloc(ip_conntrack_flow_cachep, GFP_ATOMIC);
+		if (flow) {
+			atomic_inc(&ip_conntrack_flow_count);
+			memset(flow, 0, sizeof(struct ip_conntrack_flow));
+			atomic_set(&flow->use, 0);
+			flow->ip_ct_dir_original_ip = ip;
+			list = (struct list_head *) flow;
+			list_add(list, &ip_conntrack_flow_hash[flow_hash]);
+		}
+	}
+	if (flow) {
+		atomic_inc(&flow->use);
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->icmp);
+				break;
+			case IPPROTO_TCP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->tcp);
+				break;
+			case IPPROTO_UDP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->udp);
+				break;
+		}
+	}
+	WRITE_UNLOCK(&ip_conntrack_lock);
+	return(flow == NULL);
+}
+
+void
+ip_conntrack_flow_dec(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	if (flow) {
+		WRITE_LOCK(&ip_conntrack_lock);
+		atomic_dec(&flow->use);
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				if (flow->icmp) {
+					flow->icmp--;
+				}
+				break;
+			case IPPROTO_TCP:
+				if (flow->tcp) {
+					flow->tcp--;
+				}
+				break;
+			case IPPROTO_UDP:
+				if (flow->udp) {
+					flow->udp--;
+				}
+				break;
+		}
+		if (atomic_read(&flow->use) == 0) {
+			list = (struct list_head *) flow;
+			list_del(list);
+			kmem_cache_free(ip_conntrack_flow_cachep, flow);
+			atomic_dec(&ip_conntrack_flow_count);
+		}
+		WRITE_UNLOCK(&ip_conntrack_lock);
+	} else {
+		printk(KERN_WARNING "conntrack being destroyed, yet not found on flow list\n");
+	}
+}
+
 int
 ip_ct_get_tuple(const struct iphdr *iph,
 		const struct sk_buff *skb,
@@ -288,6 +399,9 @@
 	IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
 	IP_NF_ASSERT(!timer_pending(&ct->timeout));
 
+	/* flow entry: delete flow here */
+	ip_conntrack_flow_dec(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+
 	/* To make sure we don't get any weird locking issues here:
 	 * destroy_conntrack() MUST NOT be called with a write lock
 	 * to ip_conntrack_lock!!! -HW */
@@ -618,6 +732,11 @@
 end:	atomic_inc(&ip_conntrack_count);
 	WRITE_UNLOCK(&ip_conntrack_lock);
 
+	/* flow entry: insert flow here */
+	if (ip_conntrack_flow_inc(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL])) {
+		printk(KERN_WARNING "flow cache alloc failed, cannot track new flows\n");
+	}
+
 ret:	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
 }
 
@@ -1304,8 +1423,10 @@
 	}
 
 	kmem_cache_destroy(ip_conntrack_cachep);
+	kmem_cache_destroy(ip_conntrack_flow_cachep);
 	kmem_cache_destroy(ip_conntrack_expect_cachep);
 	vfree(ip_conntrack_hash);
+	vfree(ip_conntrack_flow_hash);
 	nf_unregister_sockopt(&so_getorigdst);
 }
 
@@ -1330,6 +1451,7 @@
 		if (ip_conntrack_htable_size < 16)
 			ip_conntrack_htable_size = 16;
 	}
+	ip_conntrack_flow_htable_size = ip_conntrack_htable_size / 2;
 	ip_conntrack_max = 8 * ip_conntrack_htable_size;
 
 	printk("ip_conntrack version %s (%u buckets, %d max)"
@@ -1350,12 +1472,27 @@
 		goto err_unreg_sockopt;
 	}
 
+	ip_conntrack_flow_hash = vmalloc(sizeof(struct list_head)
+					* ip_conntrack_flow_htable_size);
+	if (!ip_conntrack_flow_hash) {
+		printk(KERN_ERR "Unable to create ip_conntrack_flow_hash\n");
+		goto err_free_hash;
+	}
+
 	ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
 	                                        sizeof(struct ip_conntrack), 0,
 	                                        SLAB_HWCACHE_ALIGN, NULL, NULL);
 	if (!ip_conntrack_cachep) {
 		printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
-		goto err_free_hash;
+		goto err_free_flow_hash;
+	}
+
+	ip_conntrack_flow_cachep = kmem_cache_create("ip_conntrack_flow",
+	                                        sizeof(struct ip_conntrack_flow), 0,
+	                                        SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!ip_conntrack_flow_cachep) {
+		printk(KERN_ERR "Unable to create ip_conntrack_flow slab cache\n");
+		goto err_free_conntrack_slab;
 	}
 
 	ip_conntrack_expect_cachep = kmem_cache_create("ip_conntrack_expect",
@@ -1363,7 +1500,7 @@
 					0, SLAB_HWCACHE_ALIGN, NULL, NULL);
 	if (!ip_conntrack_expect_cachep) {
 		printk(KERN_ERR "Unable to create ip_expect slab cache\n");
-		goto err_free_conntrack_slab;
+		goto err_free_conntrack_flow_slab;
 	}
 
 	/* Don't NEED lock here, but good form anyway. */
@@ -1379,6 +1516,9 @@
 	for (i = 0; i < ip_conntrack_htable_size; i++)
 		INIT_LIST_HEAD(&ip_conntrack_hash[i]);
 
+	for (i = 0; i < ip_conntrack_flow_htable_size; i++)
+		INIT_LIST_HEAD(&ip_conntrack_flow_hash[i]);
+
 	/* For use by ipt_REJECT */
 	ip_ct_attach = ip_conntrack_attach;
 
@@ -1390,8 +1530,12 @@
 
 	return ret;
 
+err_free_conntrack_flow_slab:
+	kmem_cache_destroy(ip_conntrack_flow_cachep);
 err_free_conntrack_slab:
 	kmem_cache_destroy(ip_conntrack_cachep);
+err_free_flow_hash:
+	vfree(ip_conntrack_flow_hash);
 err_free_hash:
 	vfree(ip_conntrack_hash);
 err_unreg_sockopt:
diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.9-flow/net/ipv4/netfilter/ip_conntrack_standalone.c
--- linux-2.6.9/net/ipv4/netfilter/ip_conntrack_standalone.c	2004-10-27 18:44:12.967906288 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/ip_conntrack_standalone.c	2004-10-27 18:22:39.881485304 -0500
@@ -46,6 +46,7 @@
 MODULE_LICENSE("GPL");
 
 extern atomic_t ip_conntrack_count;
+extern atomic_t ip_conntrack_flow_count;
 DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
 
 static int kill_proto(const struct ip_conntrack *i, void *data)
@@ -172,7 +173,7 @@
 	.stop  = ct_seq_stop,
 	.show  = ct_seq_show
 };
-  
+
 static int ct_open(struct inode *inode, struct file *file)
 {
 	return seq_open(file, &ct_seq_ops);
@@ -186,6 +187,70 @@
 	.release = seq_release
 };
   
+static void *ct_flow_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos >= ip_conntrack_flow_htable_size)
+		return NULL;
+	return &ip_conntrack_flow_hash[*pos];
+}
+  
+static void ct_flow_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static void *ct_flow_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos >= ip_conntrack_flow_htable_size)
+		return NULL;
+	return &ip_conntrack_flow_hash[*pos];
+}
+
+static int ct_flow_seq_show(struct seq_file *s, void *v)
+{
+	int ret = 0;
+	struct ip_conntrack_flow *flow;
+	struct list_head *list;
+
+	list = (struct list_head *) v;
+	list = list->next;
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, (struct list_head *) v) {
+		flow = (struct ip_conntrack_flow *) list;
+		if (seq_printf(s, "%u.%u.%u.%u IP: %u ICMP: %hu TCP: %hu UDP: %hu\n",
+			       NIPQUAD(flow->ip_ct_dir_original_ip),
+			       atomic_read(&flow->use),
+			       flow->icmp,
+			       flow->tcp,
+			       flow->udp)) {
+			ret = -ENOSPC;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	return(ret);
+}
+  
+static struct seq_operations ct_flow_seq_ops = {
+	.start = ct_flow_seq_start,
+	.next  = ct_flow_seq_next,
+	.stop  = ct_flow_seq_stop,
+	.show  = ct_flow_seq_show
+};
+
+static int ct_flow_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ct_flow_seq_ops);
+}
+
+static struct file_operations ct_flow_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_flow_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+  
 /* expects */
 static void *exp_seq_start(struct seq_file *s, loff_t *pos)
 {
@@ -351,6 +416,7 @@
 	.llseek  = seq_lseek,
 	.release = seq_release_private,
 };
+
 #endif
 
 static unsigned int ip_confirm(unsigned int hooknum,
@@ -530,6 +596,14 @@
 		.proc_handler	= &proc_dointvec,
 	},
 	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_FLOW_COUNT,
+		.procname	= "ip_conntrack_flow_count",
+		.data		= &ip_conntrack_flow_count,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
 		.ctl_name	= NET_IPV4_NF_CONNTRACK_BUCKETS,
 		.procname	= "ip_conntrack_buckets",
 		.data		= &ip_conntrack_htable_size,
@@ -725,7 +799,7 @@
 static int init_or_cleanup(int init)
 {
 #ifdef CONFIG_PROC_FS
-	struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+	struct proc_dir_entry *proc, *proc_flow, *proc_exp, *proc_stat;
 #endif
 	int ret = 0;
 
@@ -739,9 +813,12 @@
 	proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
 	if (!proc) goto cleanup_init;
 
+	proc_flow = proc_net_fops_create("ip_conntrack_flow", 0440, &ct_flow_file_ops);
+	if (!proc_flow) goto cleanup_proc;
+
 	proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440,
 					&exp_file_ops);
-	if (!proc_exp) goto cleanup_proc;
+	if (!proc_exp) goto cleanup_proc_flow;
 
 	proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat);
 	if (!proc_stat)
@@ -815,8 +892,10 @@
  cleanup_proc_stat:
 #ifdef CONFIG_PROC_FS
 	proc_net_remove("ip_conntrack_stat");
-cleanup_proc_exp:
+ cleanup_proc_exp:
 	proc_net_remove("ip_conntrack_expect");
+ cleanup_proc_flow:
+	proc_net_remove("ip_conntrack_flow");
  cleanup_proc:
 	proc_net_remove("ip_conntrack");
  cleanup_init:
@@ -875,6 +954,7 @@
 {
 }
 
+EXPORT_SYMBOL(hash_flow);
 EXPORT_SYMBOL(ip_conntrack_protocol_register);
 EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
 EXPORT_SYMBOL(invert_tuplepr);
@@ -900,6 +980,7 @@
 EXPORT_SYMBOL(ip_conntrack_expect_list);
 EXPORT_SYMBOL(ip_conntrack_lock);
 EXPORT_SYMBOL(ip_conntrack_hash);
+EXPORT_SYMBOL(ip_conntrack_flow_hash);
 EXPORT_SYMBOL(ip_conntrack_untracked);
 EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
 EXPORT_SYMBOL_GPL(ip_conntrack_put);
diff -Pru linux-2.6.9/net/ipv4/netfilter/ipt_flow.c linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c
--- linux-2.6.9/net/ipv4/netfilter/ipt_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c	2004-10-27 18:22:39.882485152 -0500
@@ -0,0 +1,127 @@
+/* Kernel module to match [IP|ICMP|TCP|UDP] flow counts. */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_flow.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
+MODULE_DESCRIPTION("iptables connection flow match module");
+
+static int
+match(const struct sk_buff *skb,
+	const struct net_device *in,
+	const struct net_device *out,
+	const void *matchinfo,
+	int offset,
+	int *hotdrop)
+{
+	const struct ipt_flow_info *finfo = matchinfo;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+	u_int16_t proto;
+	u_int32_t flow_hash,
+		  ip;
+	int ret = 0;
+
+	proto = skb->nh.iph->protocol;
+	ip = skb->nh.iph->saddr;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	if (flow) {
+		if ((atomic_read(&flow->use) > finfo->max_ip) && (finfo->proto & IPFLOW_IP))
+			ret = 1;
+		switch (proto) {
+			case IPPROTO_ICMP:
+				if ((flow->icmp > finfo->max_icmp) && (finfo->proto & IPFLOW_ICMP))
+					ret = 1;
+				break;
+			case IPPROTO_TCP:
+				if ((flow->tcp > finfo->max_tcp) && (finfo->proto & IPFLOW_TCP))
+					ret = 1;
+				break;
+			case IPPROTO_UDP:
+				if ((flow->udp > finfo->max_udp) && (finfo->proto & IPFLOW_UDP))
+					ret = 1;
+				break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	return(ret);
+}
+
+static int check(const char *tablename,
+	const struct ipt_ip *ip,
+	void *matchinfo,
+	unsigned int matchsize,
+	unsigned int hook_mask)
+{
+	const struct ipt_flow_info *finfo = matchinfo;
+
+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
+		return 0;
+
+	if (strcmp(tablename, "filter") != 0) {
+		printk(KERN_WARNING "flow: can only be used in \"filter\" table, not \"%s\"\n", tablename);
+		return(0);
+	}
+
+	switch (ip->proto) {
+		case IPPROTO_IP:
+			if (finfo->max_icmp || finfo->max_tcp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_ICMP:
+			if (finfo->max_ip || finfo->max_tcp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_TCP:
+			if (finfo->max_ip || finfo->max_icmp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_UDP:
+			if (finfo->max_ip || finfo->max_icmp || finfo->max_tcp)
+				return(0);
+			break;
+		default:
+			return(0);
+	}
+	return 1;
+}
+
+static struct ipt_match flow_match = {
+	.name		= "flow",
+	.match		= &match,
+	.checkentry	= &check,
+	.me		= THIS_MODULE,
+};
+
+static int __init init(void)
+{
+	need_ip_conntrack();
+	return ipt_register_match(&flow_match);
+}
+
+static void __exit fini(void)
+{
+	ipt_unregister_match(&flow_match);
+}
+
+module_init(init);
+module_exit(fini);

[-- Attachment #3: iptables-1.2.11-flow-20041027.diff --]
[-- Type: text/plain, Size: 6145 bytes --]

diff -Pru iptables-1.2.11/extensions/Makefile iptables-flow/extensions/Makefile
--- iptables-1.2.11/extensions/Makefile	2004-06-17 05:22:54.000000000 -0500
+++ iptables-flow/extensions/Makefile	2004-10-25 13:42:19.000000000 -0500
@@ -5,7 +5,7 @@
 # header files are present in the include/linux directory of this iptables
 # package (HW)
 #
-PF_EXT_SLIB:=ah connlimit connmark conntrack dscp ecn esp helper icmp iprange length limit mac mark multiport owner physdev pkttype realm rpc sctp standard state tcp tcpmss tos ttl udp unclean CLASSIFY CONNMARK DNAT DSCP ECN LOG MARK MASQUERADE MIRROR NETMAP NOTRACK REDIRECT REJECT SAME SNAT TARPIT TCPMSS TOS TRACE TTL ULOG
+PF_EXT_SLIB:=ah connlimit connmark conntrack dscp ecn esp flow helper icmp iprange length limit mac mark multiport owner physdev pkttype realm rpc sctp standard state tcp tcpmss tos ttl udp unclean CLASSIFY CONNMARK DNAT DSCP ECN LOG MARK MASQUERADE MIRROR NETMAP NOTRACK REDIRECT REJECT SAME SNAT TARPIT TCPMSS TOS TRACE TTL ULOG
 PF6_EXT_SLIB:=eui64 hl icmpv6 length limit mac mark multiport owner standard tcp udp HL LOG MARK TRACE
 
 # Optionals
diff -Pru iptables-1.2.11/extensions/libipt_flow.c iptables-flow/extensions/libipt_flow.c
--- iptables-1.2.11/extensions/libipt_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ iptables-flow/extensions/libipt_flow.c	2004-10-27 12:47:54.763557256 -0500
@@ -0,0 +1,154 @@
+/* Shared library add-on to iptables to add [IP|ICMP|TCP|UDP] flow count match support. */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <iptables.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ipt_flow.h>
+
+/* Function which prints out usage message. */
+static void
+help(void)
+{
+	printf(
+"flow v%s options:\n"
+" --maxip n\n"
+" --maxicmp n\n"
+" --maxtcp n\n"
+" --maxudp n\n"
+"\n", IPTABLES_VERSION);
+}
+
+/* Initialize the match. */
+static void
+init(struct ipt_entry_match *m, unsigned int *nfcache)
+{
+	/* Can't cache this */
+	*nfcache |= NFC_UNKNOWN;
+}
+
+/* Function which parses command options; returns true if it
+   ate an option */
+static int
+parse(int c, char **argv, int invert, unsigned int *flags,
+      const struct ipt_entry *entry,
+      unsigned int *nfcache,
+      struct ipt_entry_match **match)
+{
+	unsigned int count;
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)(*match)->data;
+
+	if(invert || check_inverse(optarg, &invert, &optind, 0)) {
+		exit_error(PARAMETER_PROBLEM, "flow options cannot be inverted");
+	}
+	switch (c) {
+		case 0:
+			if(string_to_number(argv[optind - 1], 1, 65535, &count) == -1)
+				exit_error(PARAMETER_PROBLEM, "maxip value out of range");
+			finfo->max_ip = count;
+			finfo->proto |= IPFLOW_IP;
+			*flags = 1;
+			break;
+		case 1:
+			if(string_to_number(argv[optind - 1], 1, 65535, &count) == -1)
+				exit_error(PARAMETER_PROBLEM, "maxicmp value out of range");
+			finfo->max_icmp = count;
+			finfo->proto |= IPFLOW_ICMP;
+			*flags = 1;
+			break;
+		case 6:
+			if(string_to_number(argv[optind - 1], 1, 65535, &count) == -1)
+				exit_error(PARAMETER_PROBLEM, "maxtcp value out of range");
+			finfo->max_tcp = count;
+			finfo->proto |= IPFLOW_TCP;
+			*flags = 1;
+			break;
+		case 17:
+			if(string_to_number(argv[optind - 1], 1, 65535, &count) == -1)
+				exit_error(PARAMETER_PROBLEM, "maxudp value out of range");
+			finfo->max_udp = count;
+			finfo->proto |= IPFLOW_UDP;
+			*flags = 1;
+			break;
+
+		default:
+			return 0;
+	}
+	return 1;
+}
+
+/* Final check; must have specified --max[icmp|tcp|udp]. */
+static void final_check(unsigned int flags)
+{
+	if (!flags)
+		exit_error(PARAMETER_PROBLEM, "You must specify `--max[ip|icmp|tcp|udp]'");
+}
+
+void flow_print(struct ipt_flow_info *finfo)
+{
+	if(finfo->max_ip)
+		printf("--maxip %i ", finfo->max_ip);
+	if(finfo->max_icmp)
+		printf("--maxicmp %i ", finfo->max_icmp);
+	if(finfo->max_tcp)
+		printf("--maxtcp %i ", finfo->max_tcp);
+	if(finfo->max_udp)
+		printf("--maxudp %i ", finfo->max_udp);
+}
+
+/* Prints out the matchinfo. */
+static void
+print(const struct ipt_ip *ip,
+      const struct ipt_entry_match *match,
+      int numeric)
+{
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)match->data;
+
+	printf("flow ");
+	flow_print(finfo);
+}
+
+/* Saves the matchinfo in parsable form to stdout. */
+static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
+{
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)match->data;
+
+	flow_print(finfo);
+}
+
+static struct option opts[] = {
+	{ .name = "maxip", .has_arg = 1, .flag =0, .val = 0 },
+	{ .name = "maxicmp", .has_arg = 1, .flag =0, .val = 1 },
+	{ .name = "maxtcp", .has_arg = 1, .flag = 0, .val = 6 },
+	{ .name = "maxudp", .has_arg = 1, .flag = 0, .val = 17 },
+	{0}
+};
+
+static
+struct iptables_match flow
+= {	.next		= NULL,
+	.name		= "flow",
+	.version	= IPTABLES_VERSION,
+	.size		= IPT_ALIGN(sizeof(struct ipt_flow_info)),
+	.userspacesize	= IPT_ALIGN(sizeof(struct ipt_flow_info)),
+	.help		= &help,
+	.init		= &init,
+	.parse		= &parse,
+	.final_check	= &final_check,
+	.print		= &print,
+	.save		= &save,
+	.extra_opts	= opts
+};
+
+void _init(void)
+{
+	register_match(&flow);
+}
diff -Pru iptables-1.2.11/extensions/libipt_flow.man iptables-flow/extensions/libipt_flow.man
--- iptables-1.2.11/extensions/libipt_flow.man	1969-12-31 18:00:00.000000000 -0600
+++ iptables-flow/extensions/libipt_flow.man	2004-10-27 11:40:46.000000000 -0500
@@ -0,0 +1,10 @@
+This module, when combined with connection tracking, allows access to
+IP, ICMP, TCP and UDP flow counts per source IP address (IP_CT_DIR_ORIGINAL
+state from ip_conntrack)  This match module can only be used in the filter
+table.
+.TP
+.BI "--maxip "   "n"
+.BI "--maxicmp " "n"
+.BI "--maxtcp "  "n"
+.BI "--maxudp "  "n"
+Where n is the max number of connections to allow from some specified source.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-10-28  2:05 [PATCH] new match extension `flow' Josh Samuelson
@ 2004-10-28 20:15 ` Josh Samuelson
  2004-10-29 19:32 ` Pablo Neira
  1 sibling, 0 replies; 10+ messages in thread
From: Josh Samuelson @ 2004-10-28 20:15 UTC (permalink / raw)
  To: netfilter-devel

[-- Attachment #1: Type: text/plain, Size: 181 bytes --]

After thinking about it for a bit, this match could be used in more tables
then just the filter table.  These patches make it so the prior patch
works in all tables but raw.

-Josh

[-- Attachment #2: linux-2.6.9-flow-20041028-noraw.diff --]
[-- Type: text/plain, Size: 675 bytes --]

diff -Pru linux-2.6.9-flow-20041027/net/ipv4/netfilter/ipt_flow.c linux-2.6.9-flow-20041028/net/ipv4/netfilter/ipt_flow.c
--- linux-2.6.9-flow-20041027/net/ipv4/netfilter/ipt_flow.c	2004-10-28 13:49:20.430784272 -0500
+++ linux-2.6.9-flow-20041028/net/ipv4/netfilter/ipt_flow.c	2004-10-28 13:49:54.800559272 -0500
@@ -77,8 +77,8 @@
 	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
 		return 0;
 
-	if (strcmp(tablename, "filter") != 0) {
-		printk(KERN_WARNING "flow: can only be used in \"filter\" table, not \"%s\"\n", tablename);
+	if (strcmp(tablename, "raw") == 0) {
+		printk(KERN_WARNING "flow: can not by used in the \"raw\" table\n");
 		return(0);
 	}
 

[-- Attachment #3: iptables-1.2.11-flow-20041028-noraw.diff --]
[-- Type: text/plain, Size: 665 bytes --]

diff -Pru iptables-1.2.11-flow-20041027/extensions/libipt_flow.man iptables-1.2.11-flow-20041028/extensions/libipt_flow.man
--- iptables-1.2.11-flow-20041027/extensions/libipt_flow.man	2004-10-28 14:56:07.752578760 -0500
+++ iptables-1.2.11-flow-20041028/extensions/libipt_flow.man	2004-10-28 14:54:42.386556368 -0500
@@ -1,6 +1,6 @@
 This module, when combined with connection tracking, allows access to
 IP, ICMP, TCP and UDP flow counts per source IP address (IP_CT_DIR_ORIGINAL
-state from ip_conntrack)  This match module can only be used in the filter
+state from ip_conntrack)  This match module can not be used in the raw
 table.
 .TP
 .BI "--maxip "   "n"

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-10-28  2:05 [PATCH] new match extension `flow' Josh Samuelson
  2004-10-28 20:15 ` Josh Samuelson
@ 2004-10-29 19:32 ` Pablo Neira
  2004-10-31  6:38   ` Josh Samuelson
  1 sibling, 1 reply; 10+ messages in thread
From: Pablo Neira @ 2004-10-29 19:32 UTC (permalink / raw)
  To: Josh Samuelson; +Cc: netfilter-devel

Josh Samuelson wrote:

>Greetings,
>
>The attached diff files add to iptables the ability to match max
>connections allowed for various IP protocols: generic IP, ICMP, TCP
>and UDP.
>
>kernel changes:
>The patch file is for 2.6.9.
>net/ipv4/netfilter/ip_conntrack_standalone.c must be fixed by a
>patch I sent to the list earlier; see line 819 for the problem.
>It modifies some files relevant to ip_conntrack and adds a ipt_flow.c
>match module.  When a new conntrack is created, a flow structure will
>have counters incremented for the type of protocol being used, indexed
>by the original direction source IP address.  All current flows can
>be viewed in the "/proc/net/ip_conntrack_flow" file.  The current
>number of flows can be viewed from the
>"/proc/sys/net/ipv4/netfilter/ip_conntrack_flow_count" file.
>
># cat /proc/net/ip_conntrack_flow 
>192.168.1.7 IP: 1 ICMP: 0 TCP: 1 UDP: 0
>192.168.1.254 IP: 1 ICMP: 0 TCP: 0 UDP: 0
>192.168.1.7 IP: 1 ICMP: 0 TCP: 1 UDP: 0
>192.168.1.55 IP: 1 ICMP: 0 TCP: 1 UDP: 0
>192.168.1.121 IP: 2 ICMP: 0 TCP: 0 UDP: 2
># cat /proc/sys/net/ipv4/netfilter/ip_conntrack_flow_count
>5
>#
>
>iptables changes:
>The patch is for iptables-1.2.11.  It adds the source to allow
>iptables to have the flow match via the shared library API that
>iptables defines.  The module adds the following options:
>--maxip n   (which can only be used when no protocol is specified)
>--maxicmp n (allowed with -p icmp)
>--maxtcp n  (allowed with -p tcp)
>--maxudp n  (allowed with -p udp)
>The flow match module can only be used in the filter table.
>
>An example of the usage:
>
>iptables -A FORWARD -p tcp -s 192.168.1.0/24 -m flow --maxtcp 150 \
>-m state --state NEW -j REJECT --reject-with tcp-reset
>
>This would deny new TCP connections from all hosts routing
>through the machine from the 192.168.1.0/24 network that already
>have 150 connections.
>
>Hope some people find this useful!  Original idea from John Dunning,
>see "Flow count module" from Tue Oct 12 14:54:20 CEST 2004.
>
>Questions, comments?
>  
>

cool, nice work. But can't we do a match which counts new connections? I 
bet that
the problem is that we don't know when a connection is closed, maybe we 
could add a
new ip_conntrack_status (something like IPS_CLOSED_BIT) which would be 
set when
a connection is closed, i.e. in tcp tracking when in time 
TIME_WAIT/FIN_WAIT). I don't
like so much the idea of adding more stuff to the core of the conntrack 
to keep things simpler.

Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-10-29 19:32 ` Pablo Neira
@ 2004-10-31  6:38   ` Josh Samuelson
  2004-10-31 14:41     ` Pablo Neira
  0 siblings, 1 reply; 10+ messages in thread
From: Josh Samuelson @ 2004-10-31  6:38 UTC (permalink / raw)
  To: Pablo Neira; +Cc: netfilter-devel

[-- Attachment #1: Type: text/plain, Size: 2127 bytes --]

On Fri, Oct 29, 2004 at 09:32:44PM +0200, Pablo Neira wrote:
> cool, nice work. But can't we do a match which counts new connections? I 
> bet that
> the problem is that we don't know when a connection is closed, maybe we 
> could add a
> new ip_conntrack_status (something like IPS_CLOSED_BIT) which would be 
> set when
> a connection is closed, i.e. in tcp tracking when in time 
> TIME_WAIT/FIN_WAIT). I don't
> like so much the idea of adding more stuff to the core of the conntrack 
> to keep things simpler.
> 
> Pablo

That's what I thought was elegant about this approach, the simplicity.
Besides the added code needed to set up the memory structures to store
the counters and the proc entries, there are only two entry points for
the added code.  One in init_conntrack() -> ip_conntrack_flow_inc(), the
other in destroy_conntrack() -> ip_conntrack_flow_dec().
Counter increment/decrement happen closest to when the state
of the connection tracking actually change.  The cost of this
code path is always going to contain a hash computation, a linked list
search to find the structure, then increment/decrement the relevant
counter.  New and unused old flow structures will have an
allocation/deallocation and linked list add/delete overhead added to the
former stated cost.

Later, the match modules that could use the data from these flow structures
only have to compute a hash/LL search for the structure, then compare the
data.  No looping through the ip_conntrack_hash to count protocol types,
meanwhile checking states/timeouts or even invalidating the existence
of conntracks the match function believes exist against what the
connection tracking core actually has; executing said process for
each matched packet.

It could be argued that not everyone will want their connection tracking
core to have this added functionality because they simply don't need it.
I couldn't agree more!  Which makes me wonder why I didn't make the changes
to the conntrack_core a boolean sub-option initially.  Well, it is now!

I hope that this change eases your feelings about the slight conntrack_core
tampering.

Cheers,
-Josh

[-- Attachment #2: linux-2.6.9-flow-20041030.diff --]
[-- Type: text/plain, Size: 22387 bytes --]

diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack.h	2004-10-18 16:55:21.000000000 -0500
+++ linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack.h	2004-10-30 16:46:31.436032840 -0500
@@ -6,6 +6,9 @@
 
 #include <linux/config.h>
 #include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
+#ifdef CONFIG_IP_NF_CT_FLOW
+#include <linux/netfilter_ipv4/ip_conntrack_flow.h>
+#endif
 #include <linux/bitops.h>
 #include <linux/compiler.h>
 #include <asm/atomic.h>
@@ -291,6 +294,9 @@
 }
 
 extern unsigned int ip_conntrack_htable_size;
+#ifdef CONFIG_IP_NF_CT_FLOW
+extern unsigned int ip_conntrack_flow_htable_size;
+#endif
  
 struct ip_conntrack_stat
 {
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_core.h linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack_core.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_core.h	2004-10-18 16:55:35.000000000 -0500
+++ linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack_core.h	2004-10-30 16:46:59.585753432 -0500
@@ -17,6 +17,11 @@
 
 struct ip_conntrack_protocol;
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+extern u_int32_t
+hash_flow(u_int32_t ip);
+#endif
+
 extern int
 ip_ct_get_tuple(const struct iphdr *iph,
 		const struct sk_buff *skb,
@@ -46,6 +51,9 @@
 }
 
 extern struct list_head *ip_conntrack_hash;
+#ifdef CONFIG_IP_NF_CT_FLOW
+extern struct list_head *ip_conntrack_flow_hash;
+#endif
 extern struct list_head ip_conntrack_expect_list;
 DECLARE_RWLOCK_EXTERN(ip_conntrack_lock);
 DECLARE_RWLOCK_EXTERN(ip_conntrack_expect_tuple_lock);
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_flow.h linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_conntrack_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ip_conntrack_flow.h	2004-10-29 20:30:24.000000000 -0500
@@ -0,0 +1,27 @@
+#ifndef _IP_CONNTRACK_FLOW_H
+#define _IP_CONNTRACK_FLOW_H
+
+/* A `flow' is a structure containing the IP connection count
+  on various IP protocols.
+*/
+
+struct ip_conntrack_flow
+{
+#ifdef __KERNEL__
+	struct list_head list;
+	atomic_t use;
+#else
+	struct
+	{
+		void *next,
+		     *prev;
+	} list;
+	u_int32_t use;
+#endif /* __KERNEL__ */
+	u_int32_t ip_ct_dir_original_ip;
+	u_int16_t icmp,
+		  tcp,
+		  udp;
+};
+
+#endif /* _IP_CONNTRACK_FLOW_H */
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ipt_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow-20041030/include/linux/netfilter_ipv4/ipt_flow.h	2004-10-29 20:30:24.000000000 -0500
@@ -0,0 +1,21 @@
+#ifndef _IPT_FLOW_H
+#define _IPT_FLOW_H
+
+typedef enum
+{
+	IPFLOW_IP = 1,
+	IPFLOW_ICMP = 2,
+	IPFLOW_TCP = 4,
+	IPFLOW_UDP = 8
+} ipflow_t;
+
+struct ipt_flow_info
+{
+	ipflow_t  proto;
+	u_int16_t max_ip,
+		  max_icmp,
+		  max_tcp,
+		  max_udp;
+};
+
+#endif /* IPT_FLOW_H */
diff -Pru linux-2.6.9/include/linux/sysctl.h linux-2.6.9-flow-20041030/include/linux/sysctl.h
--- linux-2.6.9/include/linux/sysctl.h	2004-10-18 16:54:31.000000000 -0500
+++ linux-2.6.9-flow-20041030/include/linux/sysctl.h	2004-10-29 20:30:24.000000000 -0500
@@ -426,6 +426,7 @@
  	NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
  	NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
 	NET_IPV4_NF_CONNTRACK_COUNT=27,
+	NET_IPV4_NF_CONNTRACK_FLOW_COUNT=28,
 };
  
 /* /proc/sys/net/ipv6 */
diff -Pru linux-2.6.9/net/ipv4/netfilter/Kconfig linux-2.6.9-flow-20041030/net/ipv4/netfilter/Kconfig
--- linux-2.6.9/net/ipv4/netfilter/Kconfig	2004-10-18 16:54:55.000000000 -0500
+++ linux-2.6.9-flow-20041030/net/ipv4/netfilter/Kconfig	2004-10-30 21:25:54.522656320 -0500
@@ -32,6 +32,25 @@
 
 	  If unsure, say `N'.
 
+config IP_NF_CT_FLOW
+	bool "Connection tracking protocol flow counters (EXPERIMENTAL)"
+	depends on IP_NF_CONNTRACK && EXPERIMENTAL
+	help
+	  If this option is enabled, the connection tracking code will
+	  keep protocol flow counters indexed by the original direction
+	  source IP address.  The protocol counters include generic IP,
+	  ICMP, TCP and UDP.
+
+	  These counters can be read from "/proc/net/ip_conntrack_flow".
+
+	  The number of entries being tracked can be read from
+	  "/proc/sys/net/ipv4/netfilter/ip_conntrack_flow_count".
+
+	  These counters can be used in the "Connection tracking protocol
+	  flow counters" match, see below.
+
+	  If unsure, say N.
+
 config IP_NF_CT_PROTO_SCTP
 	tristate  'SCTP protocol connection tracking support (EXPERIMENTAL)'
 	depends on IP_NF_CONNTRACK && EXPERIMENTAL
@@ -279,6 +298,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_MATCH_FLOW
+	tristate 'Connection tracking protocol flow counters match support (EXPERIMENTAL)'
+	depends on IP_NF_IPTABLES && IP_NF_CT_FLOW && (IP_NF_FILTER || IP_NF_NAT || IP_NF_MANGLE) && EXPERIMENTAL
+	help
+	  `flow' matching allows you to match a packet when the specified
+	  number of known protocol connections from a original direction
+	  source IP address is exceeded.  The matches can be made against
+	  generic IP, ICMP, TCP or UDP flow counters.  This match can be
+	  used in all tables but raw.
+
+	  For example, this match allows you to control the number and type
+	  of connections (flows) from hosts in a known local network routing
+	  through the machine.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_NF_MATCH_OWNER
 	tristate "Owner match support"
 	depends on IP_NF_IPTABLES
diff -Pru linux-2.6.9/net/ipv4/netfilter/Makefile linux-2.6.9-flow-20041030/net/ipv4/netfilter/Makefile
--- linux-2.6.9/net/ipv4/netfilter/Makefile	2004-10-18 16:53:43.000000000 -0500
+++ linux-2.6.9-flow-20041030/net/ipv4/netfilter/Makefile	2004-10-29 20:30:24.000000000 -0500
@@ -67,6 +67,7 @@
 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
 obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
 obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
+obj-$(CONFIG_IP_NF_MATCH_FLOW) += ipt_flow.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.9-flow-20041030/net/ipv4/netfilter/ip_conntrack_core.c
--- linux-2.6.9/net/ipv4/netfilter/ip_conntrack_core.c	2004-10-18 16:53:05.000000000 -0500
+++ linux-2.6.9-flow-20041030/net/ipv4/netfilter/ip_conntrack_core.c	2004-10-30 22:17:03.919037144 -0500
@@ -76,6 +76,14 @@
 struct ip_conntrack ip_conntrack_untracked;
 unsigned int ip_ct_log_invalid;
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+atomic_t ip_conntrack_flow_count = ATOMIC_INIT(0);
+EXPORT_SYMBOL(ip_conntrack_flow_count);
+unsigned int ip_conntrack_flow_htable_size = 0;
+struct list_head *ip_conntrack_flow_hash;
+static kmem_cache_t *ip_conntrack_flow_cachep;
+#endif
+
 DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
 
 inline void 
@@ -100,6 +108,114 @@
 	                     ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
 }
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+u_int32_t
+hash_flow(u_int32_t ip)
+{
+	return(jhash_1word(ip, ip_conntrack_hash_rnd) % ip_conntrack_flow_htable_size);
+}
+
+#define INCREMENT_WITHOUT_OVERFLOW(c) if (c < ((1 << (sizeof(c) * 8)) - 1)) c++
+
+int
+ip_conntrack_flow_inc(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	WRITE_LOCK(&ip_conntrack_lock);
+	if (!flow) {
+		flow = kmem_cache_alloc(ip_conntrack_flow_cachep, GFP_ATOMIC);
+		if (flow) {
+			atomic_inc(&ip_conntrack_flow_count);
+			memset(flow, 0, sizeof(struct ip_conntrack_flow));
+			atomic_set(&flow->use, 0);
+			flow->ip_ct_dir_original_ip = ip;
+			list = (struct list_head *) flow;
+			list_add(list, &ip_conntrack_flow_hash[flow_hash]);
+		}
+	}
+	if (flow) {
+		atomic_inc(&flow->use);
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->icmp);
+				break;
+			case IPPROTO_TCP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->tcp);
+				break;
+			case IPPROTO_UDP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->udp);
+				break;
+		}
+	}
+	WRITE_UNLOCK(&ip_conntrack_lock);
+	return(flow == NULL);
+}
+
+void
+ip_conntrack_flow_dec(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	if (flow) {
+		WRITE_LOCK(&ip_conntrack_lock);
+		atomic_dec(&flow->use);
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				if (flow->icmp) {
+					flow->icmp--;
+				}
+				break;
+			case IPPROTO_TCP:
+				if (flow->tcp) {
+					flow->tcp--;
+				}
+				break;
+			case IPPROTO_UDP:
+				if (flow->udp) {
+					flow->udp--;
+				}
+				break;
+		}
+		if (atomic_read(&flow->use) == 0) {
+			list = (struct list_head *) flow;
+			list_del(list);
+			kmem_cache_free(ip_conntrack_flow_cachep, flow);
+			atomic_dec(&ip_conntrack_flow_count);
+		}
+		WRITE_UNLOCK(&ip_conntrack_lock);
+	} else {
+		printk(KERN_WARNING "conntrack being destroyed, yet not found on flow list\n");
+	}
+}
+#endif /* CONFIG_IP_NF_CT_FLOW */
+
 int
 ip_ct_get_tuple(const struct iphdr *iph,
 		const struct sk_buff *skb,
@@ -288,6 +404,11 @@
 	IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
 	IP_NF_ASSERT(!timer_pending(&ct->timeout));
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	/* flow entry: delete flow here */
+	ip_conntrack_flow_dec(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+#endif
+
 	/* To make sure we don't get any weird locking issues here:
 	 * destroy_conntrack() MUST NOT be called with a write lock
 	 * to ip_conntrack_lock!!! -HW */
@@ -618,6 +739,13 @@
 end:	atomic_inc(&ip_conntrack_count);
 	WRITE_UNLOCK(&ip_conntrack_lock);
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	/* flow entry: insert flow here */
+	if (ip_conntrack_flow_inc(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL])) {
+		printk(KERN_WARNING "flow cache alloc failed, cannot track new flows\n");
+	}
+#endif
+
 ret:	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
 }
 
@@ -1304,8 +1432,14 @@
 	}
 
 	kmem_cache_destroy(ip_conntrack_cachep);
+#ifdef CONFIG_IP_NF_CT_FLOW
+	kmem_cache_destroy(ip_conntrack_flow_cachep);
+#endif
 	kmem_cache_destroy(ip_conntrack_expect_cachep);
 	vfree(ip_conntrack_hash);
+#ifdef CONFIG_IP_NF_CT_FLOW
+	vfree(ip_conntrack_flow_hash);
+#endif
 	nf_unregister_sockopt(&so_getorigdst);
 }
 
@@ -1337,6 +1471,15 @@
 	       ip_conntrack_htable_size, ip_conntrack_max,
 	       sizeof(struct ip_conntrack));
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	ip_conntrack_flow_htable_size = ip_conntrack_htable_size / 2;
+
+	printk("  protocol flow counters (%u buckets, %d max)"
+	       " - %Zd bytes per CT_DO source\n",
+	       ip_conntrack_flow_htable_size, ip_conntrack_max,
+	       sizeof(struct ip_conntrack_flow));
+#endif
+
 	ret = nf_register_sockopt(&so_getorigdst);
 	if (ret != 0) {
 		printk(KERN_ERR "Unable to register netfilter socket option\n");
@@ -1350,20 +1493,47 @@
 		goto err_unreg_sockopt;
 	}
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	ip_conntrack_flow_hash = vmalloc(sizeof(struct list_head)
+					* ip_conntrack_flow_htable_size);
+	if (!ip_conntrack_flow_hash) {
+		printk(KERN_ERR "Unable to create ip_conntrack_flow_hash\n");
+		goto err_free_hash;
+	}
+#endif
+
 	ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
 	                                        sizeof(struct ip_conntrack), 0,
 	                                        SLAB_HWCACHE_ALIGN, NULL, NULL);
 	if (!ip_conntrack_cachep) {
 		printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
+#ifdef CONFIG_IP_NF_CT_FLOW
+		goto err_free_flow_hash;
+#else
 		goto err_free_hash;
+#endif
+	}
+
+#ifdef CONFIG_IP_NF_CT_FLOW
+	ip_conntrack_flow_cachep = kmem_cache_create("ip_conntrack_flow",
+	                                        sizeof(struct ip_conntrack_flow), 0,
+	                                        SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!ip_conntrack_flow_cachep) {
+		printk(KERN_ERR "Unable to create ip_conntrack_flow slab cache\n");
+		goto err_free_conntrack_slab;
 	}
+#endif
 
 	ip_conntrack_expect_cachep = kmem_cache_create("ip_conntrack_expect",
 					sizeof(struct ip_conntrack_expect),
 					0, SLAB_HWCACHE_ALIGN, NULL, NULL);
 	if (!ip_conntrack_expect_cachep) {
 		printk(KERN_ERR "Unable to create ip_expect slab cache\n");
+#ifdef CONFIG_IP_NF_CT_FLOW
+		goto err_free_conntrack_flow_slab;
+#else
 		goto err_free_conntrack_slab;
+#endif
 	}
 
 	/* Don't NEED lock here, but good form anyway. */
@@ -1379,6 +1549,11 @@
 	for (i = 0; i < ip_conntrack_htable_size; i++)
 		INIT_LIST_HEAD(&ip_conntrack_hash[i]);
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	for (i = 0; i < ip_conntrack_flow_htable_size; i++)
+		INIT_LIST_HEAD(&ip_conntrack_flow_hash[i]);
+#endif
+
 	/* For use by ipt_REJECT */
 	ip_ct_attach = ip_conntrack_attach;
 
@@ -1390,8 +1565,16 @@
 
 	return ret;
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+err_free_conntrack_flow_slab:
+	kmem_cache_destroy(ip_conntrack_flow_cachep);
+#endif
 err_free_conntrack_slab:
 	kmem_cache_destroy(ip_conntrack_cachep);
+#ifdef CONFIG_IP_NF_CT_FLOW
+err_free_flow_hash:
+	vfree(ip_conntrack_flow_hash);
+#endif
 err_free_hash:
 	vfree(ip_conntrack_hash);
 err_unreg_sockopt:
diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.9-flow-20041030/net/ipv4/netfilter/ip_conntrack_standalone.c
--- linux-2.6.9/net/ipv4/netfilter/ip_conntrack_standalone.c	2004-10-30 21:51:12.798843256 -0500
+++ linux-2.6.9-flow-20041030/net/ipv4/netfilter/ip_conntrack_standalone.c	2004-10-30 16:45:52.609935304 -0500
@@ -46,6 +46,9 @@
 MODULE_LICENSE("GPL");
 
 extern atomic_t ip_conntrack_count;
+#ifdef CONFIG_IP_NF_CT_FLOW
+extern atomic_t ip_conntrack_flow_count;
+#endif
 DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
 
 static int kill_proto(const struct ip_conntrack *i, void *data)
@@ -172,7 +175,7 @@
 	.stop  = ct_seq_stop,
 	.show  = ct_seq_show
 };
-  
+
 static int ct_open(struct inode *inode, struct file *file)
 {
 	return seq_open(file, &ct_seq_ops);
@@ -186,6 +189,72 @@
 	.release = seq_release
 };
   
+#ifdef CONFIG_IP_NF_CT_FLOW
+static void *ct_flow_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos >= ip_conntrack_flow_htable_size)
+		return NULL;
+	return &ip_conntrack_flow_hash[*pos];
+}
+  
+static void ct_flow_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static void *ct_flow_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos >= ip_conntrack_flow_htable_size)
+		return NULL;
+	return &ip_conntrack_flow_hash[*pos];
+}
+
+static int ct_flow_seq_show(struct seq_file *s, void *v)
+{
+	int ret = 0;
+	struct ip_conntrack_flow *flow;
+	struct list_head *list;
+
+	list = (struct list_head *) v;
+	list = list->next;
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, (struct list_head *) v) {
+		flow = (struct ip_conntrack_flow *) list;
+		if (seq_printf(s, "%u.%u.%u.%u IP: %u ICMP: %hu TCP: %hu UDP: %hu\n",
+			       NIPQUAD(flow->ip_ct_dir_original_ip),
+			       atomic_read(&flow->use),
+			       flow->icmp,
+			       flow->tcp,
+			       flow->udp)) {
+			ret = -ENOSPC;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	return(ret);
+}
+  
+static struct seq_operations ct_flow_seq_ops = {
+	.start = ct_flow_seq_start,
+	.next  = ct_flow_seq_next,
+	.stop  = ct_flow_seq_stop,
+	.show  = ct_flow_seq_show
+};
+
+static int ct_flow_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ct_flow_seq_ops);
+}
+
+static struct file_operations ct_flow_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_flow_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+#endif /* CONFIG_IP_NF_CT_FLOW */
+  
 /* expects */
 static void *exp_seq_start(struct seq_file *s, loff_t *pos)
 {
@@ -351,6 +420,7 @@
 	.llseek  = seq_lseek,
 	.release = seq_release_private,
 };
+
 #endif
 
 static unsigned int ip_confirm(unsigned int hooknum,
@@ -529,6 +599,16 @@
 		.mode		= 0444,
 		.proc_handler	= &proc_dointvec,
 	},
+#ifdef CONFIG_IP_NF_CT_FLOW
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_FLOW_COUNT,
+		.procname	= "ip_conntrack_flow_count",
+		.data		= &ip_conntrack_flow_count,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
 	{
 		.ctl_name	= NET_IPV4_NF_CONNTRACK_BUCKETS,
 		.procname	= "ip_conntrack_buckets",
@@ -725,7 +805,11 @@
 static int init_or_cleanup(int init)
 {
 #ifdef CONFIG_PROC_FS
-	struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+	struct proc_dir_entry *proc,
+#ifdef CONFIG_IP_NF_CT_FLOW
+	*proc_flow,
+#endif
+	*proc_exp, *proc_stat;
 #endif
 	int ret = 0;
 
@@ -739,9 +823,18 @@
 	proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
 	if (!proc) goto cleanup_init;
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+	proc_flow = proc_net_fops_create("ip_conntrack_flow", 0440, &ct_flow_file_ops);
+	if (!proc_flow) goto cleanup_proc;
+#endif
+
 	proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440,
 					&exp_file_ops);
+#ifdef CONFIG_IP_NF_CT_FLOW
+	if (!proc_exp) goto cleanup_proc_flow;
+#else
 	if (!proc_exp) goto cleanup_proc;
+#endif
 
 	proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat);
 	if (!proc_stat)
@@ -815,8 +908,12 @@
  cleanup_proc_stat:
 #ifdef CONFIG_PROC_FS
 	proc_net_remove("ip_conntrack_stat");
-cleanup_proc_exp:
+ cleanup_proc_exp:
 	proc_net_remove("ip_conntrack_expect");
+#ifdef CONFIG_IP_NF_CT_FLOW
+ cleanup_proc_flow:
+	proc_net_remove("ip_conntrack_flow");
+#endif
  cleanup_proc:
 	proc_net_remove("ip_conntrack");
  cleanup_init:
@@ -875,6 +972,9 @@
 {
 }
 
+#ifdef CONFIG_IP_NF_CT_FLOW
+EXPORT_SYMBOL(hash_flow);
+#endif
 EXPORT_SYMBOL(ip_conntrack_protocol_register);
 EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
 EXPORT_SYMBOL(invert_tuplepr);
@@ -900,6 +1000,9 @@
 EXPORT_SYMBOL(ip_conntrack_expect_list);
 EXPORT_SYMBOL(ip_conntrack_lock);
 EXPORT_SYMBOL(ip_conntrack_hash);
+#ifdef CONFIG_IP_NF_CT_FLOW
+EXPORT_SYMBOL(ip_conntrack_flow_hash);
+#endif
 EXPORT_SYMBOL(ip_conntrack_untracked);
 EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
 EXPORT_SYMBOL_GPL(ip_conntrack_put);
diff -Pru linux-2.6.9/net/ipv4/netfilter/ipt_flow.c linux-2.6.9-flow-20041030/net/ipv4/netfilter/ipt_flow.c
--- linux-2.6.9/net/ipv4/netfilter/ipt_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow-20041030/net/ipv4/netfilter/ipt_flow.c	2004-10-30 01:03:48.000000000 -0500
@@ -0,0 +1,127 @@
+/* Kernel module to match [IP|ICMP|TCP|UDP] flow counts. */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_flow.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
+MODULE_DESCRIPTION("iptables connection flow match module");
+
+static int
+match(const struct sk_buff *skb,
+	const struct net_device *in,
+	const struct net_device *out,
+	const void *matchinfo,
+	int offset,
+	int *hotdrop)
+{
+	const struct ipt_flow_info *finfo = matchinfo;
+	struct list_head *list;
+	struct ip_conntrack_flow *flow = NULL;
+	u_int16_t proto;
+	u_int32_t flow_hash,
+		  ip;
+	int ret = 0;
+
+	proto = skb->nh.iph->protocol;
+	ip = skb->nh.iph->saddr;
+	flow_hash = hash_flow(ip);
+	READ_LOCK(&ip_conntrack_lock);
+	list_for_each(list, &ip_conntrack_flow_hash[flow_hash]) {
+		if (((struct ip_conntrack_flow *) list)->ip_ct_dir_original_ip == ip) {
+			flow = (struct ip_conntrack_flow *) list;
+			break;
+		}
+	}
+	if (flow) {
+		if ((atomic_read(&flow->use) > finfo->max_ip) && (finfo->proto & IPFLOW_IP))
+			ret = 1;
+		switch (proto) {
+			case IPPROTO_ICMP:
+				if ((flow->icmp > finfo->max_icmp) && (finfo->proto & IPFLOW_ICMP))
+					ret = 1;
+				break;
+			case IPPROTO_TCP:
+				if ((flow->tcp > finfo->max_tcp) && (finfo->proto & IPFLOW_TCP))
+					ret = 1;
+				break;
+			case IPPROTO_UDP:
+				if ((flow->udp > finfo->max_udp) && (finfo->proto & IPFLOW_UDP))
+					ret = 1;
+				break;
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+	return(ret);
+}
+
+static int check(const char *tablename,
+	const struct ipt_ip *ip,
+	void *matchinfo,
+	unsigned int matchsize,
+	unsigned int hook_mask)
+{
+	const struct ipt_flow_info *finfo = matchinfo;
+
+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
+		return 0;
+
+	if (strcmp(tablename, "raw") == 0) {
+		printk(KERN_WARNING "flow: can not by used in the \"raw\" table\n");
+		return(0);
+	}
+
+	switch (ip->proto) {
+		case IPPROTO_IP:
+			if (finfo->max_icmp || finfo->max_tcp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_ICMP:
+			if (finfo->max_ip || finfo->max_tcp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_TCP:
+			if (finfo->max_ip || finfo->max_icmp || finfo->max_udp)
+				return(0);
+			break;
+		case IPPROTO_UDP:
+			if (finfo->max_ip || finfo->max_icmp || finfo->max_tcp)
+				return(0);
+			break;
+		default:
+			return(0);
+	}
+	return 1;
+}
+
+static struct ipt_match flow_match = {
+	.name		= "flow",
+	.match		= &match,
+	.checkentry	= &check,
+	.me		= THIS_MODULE,
+};
+
+static int __init init(void)
+{
+	need_ip_conntrack();
+	return ipt_register_match(&flow_match);
+}
+
+static void __exit fini(void)
+{
+	ipt_unregister_match(&flow_match);
+}
+
+module_init(init);
+module_exit(fini);

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-10-31  6:38   ` Josh Samuelson
@ 2004-10-31 14:41     ` Pablo Neira
  2004-11-04  2:20       ` Josh Samuelson
  0 siblings, 1 reply; 10+ messages in thread
From: Pablo Neira @ 2004-10-31 14:41 UTC (permalink / raw)
  To: Josh Samuelson; +Cc: netfilter-devel

Josh,

Josh Samuelson wrote:

>On Fri, Oct 29, 2004 at 09:32:44PM +0200, Pablo Neira wrote:
>  
>
>>cool, nice work. But can't we do a match which counts new connections? I 
>>bet that
>>the problem is that we don't know when a connection is closed, maybe we 
>>could add a
>>new ip_conntrack_status (something like IPS_CLOSED_BIT) which would be 
>>set when
>>a connection is closed, i.e. in tcp tracking when in time 
>>TIME_WAIT/FIN_WAIT). I don't
>>like so much the idea of adding more stuff to the core of the conntrack 
>>to keep things simpler.
>>
>>Pablo
>>    
>>
>
>That's what I thought was elegant about this approach, the simplicity.
>Besides the added code needed to set up the memory structures to store
>the counters and the proc entries, there are only two entry points for
>the added code.  One in init_conntrack() -> ip_conntrack_flow_inc(), the
>other in destroy_conntrack() -> ip_conntrack_flow_dec().
>Counter increment/decrement happen closest to when the state
>of the connection tracking actually change.  The cost of this
>code path is always going to contain a hash computation, a linked list
>search to find the structure, then increment/decrement the relevant
>counter.  New and unused old flow structures will have an
>allocation/deallocation and linked list add/delete overhead added to the
>former stated cost.
>
>Later, the match modules that could use the data from these flow structures
>only have to compute a hash/LL search for the structure, then compare the
>data.  No looping through the ip_conntrack_hash to count protocol types,
>meanwhile checking states/timeouts or even invalidating the existence
>of conntracks the match function believes exist against what the
>connection tracking core actually has; executing said process for
>each matched packet.
>  
>

Sure, your solution is smart but I still think we can do that that this 
stuff without adding it to the core of the conntrack. I think that this 
is a perfect client of the event notification API which is *still* in 
development (expect changes).

See: 
https://lists.netfilter.org/pipermail/netfilter-devel/2004-October/017132.html

What I have in mind:

a) Move the flow API as is into a kernel module that implements a match.
b) register two functions which catch IPCT_NEW/RELATED and IPCT_DESTROY 
events. These functions keep updated the hash table with info about 
new/closed connections.

Another issue, AFAICS with your approach we can limit the number of 
connections per machine, but not per network, i.e. limit to 256 tcp 
connections for the whole 192.168.20.0/24, that's a different problem to 
resolve though..

Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-10-31 14:41     ` Pablo Neira
@ 2004-11-04  2:20       ` Josh Samuelson
  2004-11-06 15:19         ` Pablo Neira
  2004-11-13 22:12         ` Pablo Neira
  0 siblings, 2 replies; 10+ messages in thread
From: Josh Samuelson @ 2004-11-04  2:20 UTC (permalink / raw)
  To: Pablo Neira; +Cc: netfilter-devel

[-- Attachment #1: Type: text/plain, Size: 1637 bytes --]

On Sun, Oct 31, 2004 at 03:41:49PM +0100, Pablo Neira wrote:
> 
> 
> Josh,
> 
> Sure, your solution is smart but I still think we can do that that this
> stuff without adding it to the core of the conntrack. I think that this
> is a perfect client of the event notification API which is *still* in
> development (expect changes).
> 
> Another issue, AFAICS with your approach we can limit the number of
> connections per machine, but not per network, i.e. limit to 256 tcp
> connections for the whole 192.168.20.0/24, that's a different problem to
> resolve though..
> 
>  Pablo
> 

Hi Pablo,

You'll have to excuse my newness to the netfilter-devel scene and the
list in general.  Thank you for pointing out the thread on the connection
tracking events patch.  I had no idea work was being done there.
Initial impression: Slick!

I took your recommendations and changed the patch to use the connection
tracking events API.  I also altered the flow match module to track
the number of connections based on a network/mask option if one is
specified.

The kernel patch can only be applied after your CTE patch.  It should
patch fine on 2.6.9, but 2.6.10-rc1 will reject a hunk for
net/ipv4/netfilter/Kconfig because I added the lines necessary for
IP_NF_CONNTRACK_EVENTS; easy enough to fix by hand though.

The iptables patch should apply fine to vanilla iptables-1.2.11.

I've only done some initial testing with this thus far: one machine
routing through a patched machine.  Hopefully here in the next
few days I can put this on a machine that routes a few hundred
nodes to put some more load on it for testing purposes.

Cheers,
-Josh

[-- Attachment #2: linux-2.6.9-flow-20041103.diff --]
[-- Type: text/plain, Size: 23188 bytes --]

diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h	2004-11-03 19:10:13.000000000 -0600
@@ -0,0 +1,28 @@
+#ifndef _IP_CTE_FLOW_H
+#define _IP_CTE_FLOW_H
+
+#define BIT_SET(b) (1 << (b))
+#define INCREMENT_WITHOUT_OVERFLOW(c) if (c < ~(1 << ((sizeof(c) * 8) - 1))) c++
+
+/* A `ip_cte_flow' is a structure containing the IP connection count
+   on various IP protocols.
+*/
+
+struct ip_cte_flow
+{
+	 struct list_head list;
+	 u_int32_t ip_ct_dir_original_ip,
+		   ip,
+		   icmp,
+		   tcp,
+		   udp;
+};
+
+extern struct list_head *ip_cte_flow_hash;
+extern unsigned int ip_cte_flow_htable_size;
+DECLARE_RWLOCK_EXTERN(ip_cte_flow_lock);
+extern int ip_cte_flow_register_notifier(struct notifier_block *nb);
+extern int ip_cte_flow_unregister_notifier(struct notifier_block *nb);
+extern u_int32_t ip_cte_hash_flow_ip(u_int32_t ip);
+
+#endif /* _IP_CTE_FLOW_H */
diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h
--- linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h	2004-11-03 17:02:37.000000000 -0600
@@ -0,0 +1,41 @@
+#ifndef _IPT_FLOW_H
+#define _IPT_FLOW_H
+
+#define BIT_SET(b) (1 << (b))
+
+typedef enum
+{
+	IPFLOW_IP,
+	IPFLOW_ICMP,
+	IPFLOW_TCP,
+	IPFLOW_UDP,
+	IPFLOW_NETWORK_MASK = 31
+} ipflow_bits_t;
+
+struct ipt_flow_nm
+{
+	u_int32_t ip,
+		  icmp,
+		  tcp,
+		  udp;
+};
+
+struct ipt_flow_info
+{
+#ifdef __KERNEL__
+	struct list_head list;
+#else
+	struct
+	{
+		void *next,
+		     *prev;
+	} list;
+#endif
+	u_int32_t proto,
+		  network,
+		  mask,
+		  max;
+	struct ipt_flow_nm *nm;
+};
+
+#endif /* IPT_FLOW_H */
diff -Pru linux-2.6.9/net/ipv4/netfilter/Kconfig linux-2.6.9-flow/net/ipv4/netfilter/Kconfig
--- linux-2.6.9/net/ipv4/netfilter/Kconfig	2004-10-18 16:54:55.000000000 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/Kconfig	2004-11-02 11:01:58.000000000 -0600
@@ -32,6 +32,28 @@
 
 	  If unsure, say `N'.
 
+config IP_NF_CONNTRACK_EVENTS
+	bool "Connection tracking events"
+	depends on IP_NF_CONNTRACK
+	help
+	  If unsure, say `N'.
+	  
+config IP_NF_CTE_FLOW
+	tristate "Connection tracking events: protocol flow counters (EXPERIMENTAL)"
+	depends on IP_NF_CONNTRACK_EVENTS && EXPERIMENTAL
+	help
+	  This option uses the connection tracking event notifiers to
+	  keep protocol flow counters indexed by the original direction
+	  source IP address.  The protocol counters include generic IP,
+	  ICMP, TCP and UDP.
+
+	  These counters can be read from "/proc/net/ip_cte_flow".
+
+	  These counters can be used in the "protocol flow counters match
+	  support", see below.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_NF_CT_PROTO_SCTP
 	tristate  'SCTP protocol connection tracking support (EXPERIMENTAL)'
 	depends on IP_NF_CONNTRACK && EXPERIMENTAL
@@ -279,6 +301,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_CTE_MATCH_FLOW
+       tristate 'protocol flow counters match support (EXPERIMENTAL)'
+       depends on IP_NF_IPTABLES && IP_NF_CTE_FLOW && (IP_NF_FILTER || IP_NF_NAT || IP_NF_MANGLE) && EXPERIMENTAL
+       help
+         `flow' matching allows you to match a packet when the specified
+         number of known protocol connections from a original direction
+         source IP address is exceeded.  The matches can be made against
+         generic IP, ICMP, TCP or UDP flow counters.  This match can be
+         used in all tables but raw.
+
+         For example, this match allows you to control the number and type
+         of connections (flows) from hosts in a known local network routing
+         through the machine.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_NF_MATCH_OWNER
 	tristate "Owner match support"
 	depends on IP_NF_IPTABLES
diff -Pru linux-2.6.9/net/ipv4/netfilter/Makefile linux-2.6.9-flow/net/ipv4/netfilter/Makefile
--- linux-2.6.9/net/ipv4/netfilter/Makefile	2004-10-18 16:53:43.000000000 -0500
+++ linux-2.6.9-flow/net/ipv4/netfilter/Makefile	2004-11-02 10:58:21.000000000 -0600
@@ -34,6 +34,9 @@
 obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
 obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
 
+# connection tracking event objects
+obj-$(CONFIG_IP_NF_CTE_FLOW) += ip_cte_flow.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
@@ -67,6 +70,7 @@
 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
 obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
 obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
+obj-$(CONFIG_IP_NF_CTE_MATCH_FLOW) += ipt_flow.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_cte_flow.c linux-2.6.9-flow/net/ipv4/netfilter/ip_cte_flow.c
--- linux-2.6.9/net/ipv4/netfilter/ip_cte_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/net/ipv4/netfilter/ip_cte_flow.c	2004-11-03 20:12:09.000000000 -0600
@@ -0,0 +1,377 @@
+/* Kernel module to track [IP|ICMP|TCP|UDP] flow counts. */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/in.h>
+#include <linux/notifier.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_cte_flow.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
+MODULE_DESCRIPTION("protocol flow count tracking via connection tracking events");
+
+DECLARE_RWLOCK(ip_cte_flow_lock);
+struct list_head *ip_cte_flow_hash;
+unsigned int ip_cte_flow_htable_size = 0;
+/* static atomic_t ip_cte_flow_count = ATOMIC_INIT(0); */
+static int ip_cte_flow_hash_rnd;
+static kmem_cache_t *ip_cte_flow_cachep;
+static struct notifier_block *ip_cte_flow_nb_chain;
+
+#ifdef CONFIG_PROC_FS
+static void *ip_cte_flow_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos >= ip_cte_flow_htable_size)
+		return NULL;
+	return &ip_cte_flow_hash[*pos];
+}
+  
+static void ip_cte_flow_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static void *ip_cte_flow_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos >= ip_cte_flow_htable_size)
+		return NULL;
+	return &ip_cte_flow_hash[*pos];
+}
+
+static int ip_cte_flow_seq_show(struct seq_file *s, void *v)
+{
+	int ret = 0;
+	struct list_head *list;
+	struct ip_cte_flow *flow;
+
+	list = (struct list_head *) v;
+	list = list->next;
+	READ_LOCK(&ip_cte_flow_lock);
+	list_for_each(list, (struct list_head *) v) {
+		flow = (struct ip_cte_flow *) list;
+		if (seq_printf(s, "%u.%u.%u.%u IP: %u ICMP: %hu "
+				  "TCP: %hu UDP: %hu\n",
+				  NIPQUAD(flow->ip_ct_dir_original_ip),
+				  flow->ip,
+				  flow->icmp,
+				  flow->tcp,
+				  flow->udp)) {
+			ret = -ENOSPC;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_cte_flow_lock);
+	return ret;
+}
+  
+static struct seq_operations ip_cte_flow_seq_ops = {
+	.start = ip_cte_flow_seq_start,
+	.next  = ip_cte_flow_seq_next,
+	.stop  = ip_cte_flow_seq_stop,
+	.show  = ip_cte_flow_seq_show
+};
+
+static int ip_cte_flow_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ip_cte_flow_seq_ops);
+}
+
+static struct file_operations ip_cte_flow_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ip_cte_flow_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+#endif /* CONFIG_PROC_FS */
+  
+int
+ip_cte_flow_register_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_register(&ip_cte_flow_nb_chain, nb);
+}
+
+int
+ip_cte_flow_unregister_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_unregister(&ip_cte_flow_nb_chain, nb);
+}
+
+void
+ip_cte_flow_event(enum ip_conntrack_events event, struct ip_conntrack *ct)
+{
+	notifier_call_chain(&ip_cte_flow_nb_chain, event, ct);
+}
+
+u_int32_t
+ip_cte_hash_flow_ip(u_int32_t ip)
+{
+	return(jhash_1word(ip, ip_cte_flow_hash_rnd) % ip_cte_flow_htable_size);
+}
+
+static int
+ip_cte_flow_inc(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_cte_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = ip_cte_hash_flow_ip(ip);
+	READ_LOCK(&ip_cte_flow_lock);
+	list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
+		if (((struct ip_cte_flow *) list)->ip_ct_dir_original_ip ==
+		    ip) {
+			flow = (struct ip_cte_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_cte_flow_lock);
+	WRITE_LOCK(&ip_cte_flow_lock);
+	if (!flow) {
+		flow = kmem_cache_alloc(ip_cte_flow_cachep, GFP_ATOMIC);
+		if (flow) {
+			/* atomic_inc(&ip_cte_flow_count); */
+			memset(flow, 0, sizeof(struct ip_cte_flow));
+			flow->ip_ct_dir_original_ip = ip;
+			list = (struct list_head *) flow;
+			list_add(list, &ip_cte_flow_hash[flow_hash]);
+		}
+	}
+	if (flow) {
+		INCREMENT_WITHOUT_OVERFLOW(flow->ip);
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->icmp);
+				break;
+			case IPPROTO_TCP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->tcp);
+				break;
+			case IPPROTO_UDP:
+				INCREMENT_WITHOUT_OVERFLOW(flow->udp);
+				break;
+		}
+	}
+	WRITE_UNLOCK(&ip_cte_flow_lock);
+	return(flow == NULL);
+}
+
+static void
+ip_cte_flow_dec(struct ip_conntrack_tuple_hash *hash)
+{
+	unsigned int flow_hash;
+	u_int32_t ip;
+	struct list_head *list;
+	struct ip_cte_flow *flow = NULL;
+
+	ip = hash->tuple.src.ip;
+	flow_hash = ip_cte_hash_flow_ip(ip);
+	READ_LOCK(&ip_cte_flow_lock);
+	list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
+		if (((struct ip_cte_flow *) list)->ip_ct_dir_original_ip ==
+		    ip) {
+			flow = (struct ip_cte_flow *) list;
+			break;
+		}
+	}
+	READ_UNLOCK(&ip_cte_flow_lock);
+	if (flow) {
+		WRITE_LOCK(&ip_cte_flow_lock);
+		if(flow->ip)
+			flow->ip--;
+		switch (hash->tuple.dst.protonum) {
+			case IPPROTO_ICMP:
+				if (flow->icmp)
+					flow->icmp--;
+				break;
+			case IPPROTO_TCP:
+				if (flow->tcp)
+					flow->tcp--;
+				break;
+			case IPPROTO_UDP:
+				if (flow->udp)
+					flow->udp--;
+				break;
+		}
+		if (flow->ip == 0) {
+			list = (struct list_head *) flow;
+			list_del(list);
+			kmem_cache_free(ip_cte_flow_cachep, flow);
+			/* atomic_dec(&ip_cte_flow_count); */
+		}
+		WRITE_UNLOCK(&ip_cte_flow_lock);
+	} else {
+		printk(KERN_WARNING "conntrack being destroyed, "
+				    "yet not found on flow list\n"
+				    "%u src: %u.%u.%u.%u dst: %u.%u.%u.%u\n",
+				    hash->tuple.dst.protonum,
+				    NIPQUAD(hash->tuple.src.ip),
+				    NIPQUAD(hash->tuple.dst.ip));
+	}
+}
+
+static void
+ip_cte_flow_existing(void)
+{
+	unsigned int i;
+	struct list_head *list;
+	struct ip_conntrack_tuple_hash *hash;
+
+	READ_LOCK(&ip_conntrack_lock);
+	for (i = 0; i < ip_conntrack_htable_size; i++) {
+		list_for_each(list, &ip_conntrack_hash[i]) {
+			hash = (struct ip_conntrack_tuple_hash *) list;
+			if (!DIRECTION(hash))
+				ip_cte_flow_inc(hash);
+		}
+	}
+	READ_UNLOCK(&ip_conntrack_lock);
+}
+
+static void
+ip_cte_flow_destroy(void)
+{
+	unsigned int i;
+	struct list_head *list;
+	struct ip_cte_flow *flow;
+
+	WRITE_LOCK(&ip_cte_flow_lock);
+	for (i = 0; i < ip_cte_flow_htable_size; i++) {
+		list = ip_cte_flow_hash[i].next;
+		while (list != &ip_cte_flow_hash[i]) {
+				flow = (struct ip_cte_flow *) list;
+				list = list->next;
+				list_del((struct list_head *) flow);
+				kmem_cache_free(ip_cte_flow_cachep, flow);
+		}
+	}
+	WRITE_UNLOCK(&ip_cte_flow_lock);
+}
+
+static int ip_cte_flow_notifier(struct notifier_block *nb,
+	unsigned long ips,
+	void *v)
+{
+	struct ip_conntrack *ct = v;
+
+	switch (ips)
+	{
+		case BIT_SET(IPCT_NEW):
+		case BIT_SET(IPCT_RELATED):
+			ip_cte_flow_inc(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+			ip_cte_flow_event(ips, ct);
+			break;
+		case BIT_SET(IPCT_DESTROY):
+			ip_cte_flow_dec(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+			ip_cte_flow_event(ips, ct);
+			break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ip_cte_flow_nb = {
+	.notifier_call = ip_cte_flow_notifier,
+	.next = NULL,
+	.priority = 0
+};
+
+static int __init init(void)
+{
+	unsigned int i;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *proc_flow;
+#endif
+
+	need_ip_conntrack();
+
+	ip_cte_flow_htable_size = ip_conntrack_htable_size / 2;
+
+	get_random_bytes(&ip_cte_flow_hash_rnd, 4);
+
+	ip_cte_flow_hash = vmalloc(sizeof(struct list_head)
+					* ip_cte_flow_htable_size);
+	if (!ip_cte_flow_hash) {
+		printk(KERN_ERR "Unable to create ip_cte_flow_hash\n");
+		goto err;
+	}
+
+	for (i = 0; i < ip_cte_flow_htable_size; i++)
+		INIT_LIST_HEAD(&ip_cte_flow_hash[i]);
+
+	ip_cte_flow_cachep = kmem_cache_create("ip_cte_flow",
+						sizeof(struct ip_cte_flow), 0,
+						SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!ip_cte_flow_cachep) {
+		printk(KERN_ERR "Unable to create ip_cte_flow slab cache\n");
+		goto err_free_hash;
+ 	}
+
+#ifdef CONFIG_PROC_FS
+	proc_flow = proc_net_fops_create("ip_cte_flow", 0440, &ip_cte_flow_file_ops);
+	if (!proc_flow) goto err_free_slab;
+#endif
+
+	if (ip_conntrack_register_notifier(&ip_cte_flow_nb)) {
+		goto cleanup_proc;
+	}
+
+	ip_cte_flow_existing();
+
+	printk("connection tracking events: protocol flow counters "
+	       "(%u buckets) - %Zd bytes per IP_CT_DIR_ORIGINAL source\n",
+	       ip_cte_flow_htable_size,
+	       sizeof(struct ip_cte_flow));
+
+	return 0;
+
+cleanup_proc:
+#ifdef CONFIG_PROC_FS
+	proc_net_remove("ip_cte_flow");
+#endif
+err_free_slab:
+ 	kmem_cache_destroy(ip_cte_flow_cachep);
+err_free_hash:
+	vfree(ip_cte_flow_hash);
+err:
+	return -ENOMEM;
+}
+
+static void __exit fini(void)
+{
+	ip_cte_flow_destroy();
+
+	if (ip_conntrack_unregister_notifier(&ip_cte_flow_nb))
+		printk(KERN_ERR "ip_conntrack_unregister_notifier() "
+				"failed, huh?\n");
+#ifdef CONFIG_PROC_FS
+	proc_net_remove("ip_cte_flow");
+#endif
+	kmem_cache_destroy(ip_cte_flow_cachep);
+	vfree(ip_cte_flow_hash);
+}
+
+module_init(init);
+module_exit(fini);
+
+EXPORT_SYMBOL(ip_cte_flow_hash);
+EXPORT_SYMBOL(ip_cte_flow_htable_size);
+EXPORT_SYMBOL(ip_cte_flow_lock);
+EXPORT_SYMBOL(ip_cte_flow_register_notifier);
+EXPORT_SYMBOL(ip_cte_flow_unregister_notifier);
+EXPORT_SYMBOL(ip_cte_hash_flow_ip);
diff -Pru linux-2.6.9/net/ipv4/netfilter/ipt_flow.c linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c
--- linux-2.6.9/net/ipv4/netfilter/ipt_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c	2004-11-03 20:12:00.000000000 -0600
@@ -0,0 +1,318 @@
+/* Kernel module to match [IP|ICMP|TCP|UDP] flow counts. */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_cte_flow.h>
+#include <linux/netfilter_ipv4/ipt_flow.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
+MODULE_DESCRIPTION("protocol flow count match module");
+
+
+DECLARE_RWLOCK(ipt_flow_lock);
+LIST_HEAD(ipt_flow_notifier_list);
+static atomic_t ipt_flow_notifier_list_count = ATOMIC_INIT(0);
+static struct notifier_block ipt_flow_nb;
+
+static void
+ipt_flow_nw_inc(struct ip_conntrack *ct,
+	struct ipt_flow_info *finfo)
+{
+	INCREMENT_WITHOUT_OVERFLOW(finfo->nm->ip);
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+		case IPPROTO_ICMP:
+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->icmp);
+			break;
+		case IPPROTO_TCP:
+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->tcp);
+			break;
+		case IPPROTO_UDP:
+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->udp);
+			break;
+	}
+}
+
+static void
+ipt_flow_nw_dec(struct ip_conntrack *ct,
+	struct ipt_flow_info *finfo)
+{
+	if (finfo->nm->ip)
+		finfo->nm->ip--;
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+		case IPPROTO_ICMP:
+			if (finfo->nm->icmp)
+				finfo->nm->icmp--;
+			break;
+		case IPPROTO_TCP:
+			if (finfo->nm->tcp)
+				finfo->nm->tcp--;
+			break;
+		case IPPROTO_UDP:
+			if (finfo->nm->udp)
+				finfo->nm->udp--;
+			break;
+	}
+}
+
+static void
+ipt_flow_existing(struct ipt_flow_info *finfo)
+{
+        unsigned int i;
+        struct list_head *list;
+        struct ip_cte_flow *flow;
+
+        READ_LOCK(&ip_cte_flow_lock);
+        for (i = 0; i < ip_cte_flow_htable_size; i++) {
+                list_for_each(list, &ip_cte_flow_hash[i]) {
+                        flow = (struct ip_cte_flow *) list;
+			if ((flow->ip_ct_dir_original_ip & finfo->mask) ==
+			    finfo->network) {
+				finfo->nm->ip += flow->ip;
+				finfo->nm->icmp += flow->icmp;
+				finfo->nm->tcp += flow->tcp;
+				finfo->nm->udp += flow->udp;
+			}
+                }
+        }
+        READ_UNLOCK(&ip_cte_flow_lock);
+}
+
+int
+ipt_flow_nm_notifier(struct notifier_block *self,
+	unsigned long event, void *vct)
+{
+	struct list_head *list;
+	struct ipt_flow_info *finfo;
+	struct ip_conntrack *ct = vct;
+	u_int32_t ip;
+
+	READ_LOCK(&ipt_flow_lock);
+	ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
+	list_for_each(list, &ipt_flow_notifier_list) {
+		finfo = (struct ipt_flow_info *) list;
+		if ((ip & finfo->mask) == finfo->network) {
+			switch (event) {
+				case BIT_SET(IPCT_NEW):
+				case BIT_SET(IPCT_RELATED):
+					ipt_flow_nw_inc(ct, finfo);
+					break;
+				case BIT_SET(IPCT_DESTROY):
+					ipt_flow_nw_dec(ct, finfo);
+					break;
+			}
+		}
+	}
+	READ_UNLOCK(&ipt_flow_lock);
+}
+
+static int
+match(const struct sk_buff *skb,
+	const struct net_device *in,
+	const struct net_device *out,
+	const void *matchinfo,
+	int offset,
+	int *hotdrop)
+{
+	const struct ipt_flow_info *finfo = matchinfo;
+	struct list_head *list;
+	struct ip_cte_flow *flow = NULL;
+	u_int16_t proto;
+	u_int32_t flow_hash,
+		  ip;
+	int ret = 0;
+
+	proto = skb->nh.iph->protocol;
+	ip = skb->nh.iph->saddr;
+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK)) &&
+	    ((ip & finfo->mask) == finfo->network)) {
+		if ((finfo->nm->ip >= finfo->max) &&
+		    (finfo->proto & BIT_SET(IPFLOW_IP)))
+			ret = 1;
+		switch (proto) {
+			case IPPROTO_ICMP:
+				if ((finfo->nm->icmp >= finfo->max) &&
+				    (finfo->proto & BIT_SET(IPFLOW_ICMP)))
+					ret = 1;
+				break;
+			case IPPROTO_TCP:
+				if ((finfo->nm->tcp >= finfo->max) &&
+				    (finfo->proto & BIT_SET(IPFLOW_TCP)))
+					ret = 1;
+				break;
+			case IPPROTO_UDP:
+				if ((finfo->nm->udp >= finfo->max) &&
+				    (finfo->proto & BIT_SET(IPFLOW_UDP)))
+					ret = 1;
+				break;
+		}
+	} else {
+		flow_hash = ip_cte_hash_flow_ip(ip);
+		READ_LOCK(&ip_cte_flow_lock);
+		list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
+			if (((struct ip_cte_flow *)
+			    list)->ip_ct_dir_original_ip == ip) {
+				flow = (struct ip_cte_flow *) list;
+				break;
+			}
+		}
+		if (flow) {
+			if ((flow->ip >= finfo->max) &&
+			    (finfo->proto & BIT_SET(IPFLOW_IP)))
+				ret = 1;
+			switch (proto) {
+				case IPPROTO_ICMP:
+					if ((flow->icmp >= finfo->max) &&
+					    (finfo->proto &
+					    BIT_SET(IPFLOW_ICMP)))
+						ret = 1;
+					break;
+				case IPPROTO_TCP:
+					if ((flow->tcp >= finfo->max) &&
+					    (finfo->proto &
+					    BIT_SET(IPFLOW_TCP)))
+						ret = 1;
+					break;
+				case IPPROTO_UDP:
+					if ((flow->udp >= finfo->max) &&
+					    (finfo->proto &
+					    BIT_SET(IPFLOW_UDP)))
+						ret = 1;
+					break;
+			}
+		}
+		READ_UNLOCK(&ip_cte_flow_lock);
+	}
+	return(ret);
+}
+
+static int check(const char *tablename,
+	const struct ipt_ip *ip,
+	void *matchinfo,
+	unsigned int matchsize,
+	unsigned int hook_mask)
+{
+	struct list_head *list;
+	struct ipt_flow_info *finfo = matchinfo;
+
+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
+		return 0;
+
+	if (strcmp(tablename, "raw") == 0) {
+		printk(KERN_WARNING "flow: can not by used in the \"raw\" table\n");
+		return 0;
+	}
+
+	switch (ip->proto & ~BIT_SET(IPFLOW_NETWORK_MASK)) {
+		case IPPROTO_IP:
+			if ((finfo->proto & BIT_SET(IPFLOW_IP)) &&
+			    (finfo->proto &
+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
+			    ~BIT_SET(IPFLOW_IP)))
+				return 0;
+			break;
+		case IPPROTO_ICMP:
+			if ((finfo->proto & BIT_SET(IPFLOW_ICMP)) &&
+			    (finfo->proto &
+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
+			    ~BIT_SET(IPFLOW_ICMP)))
+				return 0;
+			break;
+		case IPPROTO_TCP:
+			if ((finfo->proto & BIT_SET(IPFLOW_TCP)) &&
+			    (finfo->proto &
+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
+			    ~BIT_SET(IPFLOW_TCP)))
+				return 0;
+			break;
+		case IPPROTO_UDP:
+			if ((finfo->proto & BIT_SET(IPFLOW_UDP)) &&
+			    (finfo->proto &
+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
+			    ~BIT_SET(IPFLOW_UDP)))
+				return 0;
+			break;
+		default:
+			return 0;
+	}
+
+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK))) {
+		finfo->nm = vmalloc(sizeof(struct ipt_flow_nm));
+		if (!finfo->nm)
+			return 0;
+		memset(finfo->nm, 0, sizeof(struct ipt_flow_nm));
+		WRITE_LOCK(&ipt_flow_lock);
+		ipt_flow_existing(finfo);
+		list = (struct list_head *) finfo;
+		list_add(list, &ipt_flow_notifier_list);
+		if (atomic_read(&ipt_flow_notifier_list_count) == 0) {
+			ipt_flow_nb.notifier_call = ipt_flow_nm_notifier;
+			if (ip_cte_flow_register_notifier(&ipt_flow_nb)) {
+				vfree(finfo->nm);
+				return 0;
+			}
+		}
+		atomic_inc(&ipt_flow_notifier_list_count);
+		WRITE_UNLOCK(&ipt_flow_lock);
+	}
+	return 1;
+}
+
+void destroy(void *matchinfo,
+	unsigned int matchsize)
+{
+	struct list_head *list;
+	struct ipt_flow_info *finfo = matchinfo;
+
+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
+		return;
+
+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK)) && finfo->nm) {
+		WRITE_LOCK(&ipt_flow_lock);
+		atomic_dec(&ipt_flow_notifier_list_count);
+		if (atomic_read(&ipt_flow_notifier_list_count) == 0) {
+			if (ip_cte_flow_unregister_notifier(&ipt_flow_nb))
+				printk(KERN_ERR
+				       "ip_cte_flow_unregister_notifier failed"
+				       ", huh?\n");
+		}
+		list = (struct list_head *) finfo;
+		list_del(list);
+		WRITE_UNLOCK(&ipt_flow_lock);
+		vfree(finfo->nm);
+	}
+}
+
+static struct ipt_match flow_match = {
+	.name		= "flow",
+	.match		= &match,
+	.checkentry	= &check,
+	.destroy	= &destroy,
+	.me		= THIS_MODULE,
+};
+
+static int __init init(void)
+{
+	need_ip_conntrack();
+	return ipt_register_match(&flow_match);
+}
+
+static void __exit fini(void)
+{
+	ipt_unregister_match(&flow_match);
+}
+
+module_init(init);
+module_exit(fini);

[-- Attachment #3: iptables-1.2.11-flow-20041103.diff --]
[-- Type: text/plain, Size: 7097 bytes --]

diff -Pru iptables-1.2.11/extensions/Makefile iptables-1.2.11-flow/extensions/Makefile
--- iptables-1.2.11/extensions/Makefile	2004-06-17 05:22:54.000000000 -0500
+++ iptables-1.2.11-flow/extensions/Makefile	2004-11-02 11:24:37.000000000 -0600
@@ -5,7 +5,7 @@
 # header files are present in the include/linux directory of this iptables
 # package (HW)
 #
-PF_EXT_SLIB:=ah connlimit connmark conntrack dscp ecn esp helper icmp iprange length limit mac mark multiport owner physdev pkttype realm rpc sctp standard state tcp tcpmss tos ttl udp unclean CLASSIFY CONNMARK DNAT DSCP ECN LOG MARK MASQUERADE MIRROR NETMAP NOTRACK REDIRECT REJECT SAME SNAT TARPIT TCPMSS TOS TRACE TTL ULOG
+PF_EXT_SLIB:=ah connlimit connmark conntrack dscp ecn esp flow helper icmp iprange length limit mac mark multiport owner physdev pkttype realm rpc sctp standard state tcp tcpmss tos ttl udp unclean CLASSIFY CONNMARK DNAT DSCP ECN LOG MARK MASQUERADE MIRROR NETMAP NOTRACK REDIRECT REJECT SAME SNAT TARPIT TCPMSS TOS TRACE TTL ULOG
 PF6_EXT_SLIB:=eui64 hl icmpv6 length limit mac mark multiport owner standard tcp udp HL LOG MARK TRACE
 
 # Optionals
diff -Pru iptables-1.2.11/extensions/libipt_flow.c iptables-1.2.11-flow/extensions/libipt_flow.c
--- iptables-1.2.11/extensions/libipt_flow.c	1969-12-31 18:00:00.000000000 -0600
+++ iptables-1.2.11-flow/extensions/libipt_flow.c	2004-11-03 19:01:06.706838768 -0600
@@ -0,0 +1,184 @@
+/* Shared library iptables add-on to add [IP|ICMP|TCP|UDP] flow count
+ * match support.
+ */
+
+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <iptables.h>
+#include <linux/netfilter_ipv4/ipt_flow.h>
+
+#define MAX_SIZE(c) ~(1 << ((sizeof(c) * 8) - 1))
+
+/* Function which prints out usage message. */
+static void
+help(void)
+{
+	printf("flow v%s options:\n"
+	       " --maxip n\n"
+	       " --maxicmp n\n"
+	       " --maxtcp n\n"
+	       " --maxudp n\n"
+	       " --nm n.n.n.n/mm or n.n.n.n/m.m.m.m\n"
+	       "\n", IPTABLES_VERSION);
+}
+
+/* Initialize the match. */
+static void
+init(struct ipt_entry_match *m, unsigned int *nfcache)
+{
+	/* Can't cache this */
+	*nfcache |= NFC_UNKNOWN;
+}
+
+static void
+parse_network_mask(const char *nm, u_int32_t *network, u_int32_t *mask)
+{
+	unsigned int naddrs;
+	struct in_addr *addr,
+	               addr_mask;
+
+	parse_hostnetworkmask(nm, &addr, &addr_mask, &naddrs);
+	if (naddrs > 1)
+		exit_error(PARAMETER_PROBLEM, "multiple IP addresses not allowed");
+	if (naddrs == 1) {
+		*network = addr[0].s_addr;
+		*mask = addr_mask.s_addr;
+	}
+}
+
+/* Function which parses command options; returns true if it
+   ate an option */
+static int
+parse(int c, char **argv, int invert, unsigned int *flags,
+      const struct ipt_entry *entry,
+      unsigned int *nfcache,
+      struct ipt_entry_match **match)
+{
+	int ret;
+	u_int32_t count;
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)(*match)->data;
+
+	if (invert || check_inverse(optarg, &invert, &optind, 0)) {
+		exit_error(PARAMETER_PROBLEM, "flow options cannot be inverted");
+	}
+
+	if (c < 256) {
+		ret = string_to_number(argv[optind - 1], 1, MAX_SIZE(count), &count);
+		if (ret == -1) {
+			exit_error(PARAMETER_PROBLEM, "value out of range");
+		}
+	}
+
+	switch (c) {
+		case 0:
+			finfo->proto |= BIT_SET(IPFLOW_IP);
+			*flags = 1;
+			break;
+		case 1:
+			finfo->proto |= BIT_SET(IPFLOW_ICMP);
+			*flags = 1;
+			break;
+		case 6:
+			finfo->proto |= BIT_SET(IPFLOW_TCP);
+			*flags = 1;
+			break;
+		case 17:
+			finfo->proto |= BIT_SET(IPFLOW_UDP);
+			*flags = 1;
+			break;
+		case 256:
+			parse_network_mask(argv[optind - 1], &finfo->network, &finfo->mask);
+			finfo->proto |= BIT_SET(IPFLOW_NETWORK_MASK);
+			break;
+		default:
+			return 0;
+	}
+	finfo->max = count;
+	return 1;
+}
+
+/* Final check; must have specified --max[ip|icmp|tcp|udp]. */
+static void final_check(unsigned int flags)
+{
+	if (!flags)
+		exit_error(PARAMETER_PROBLEM, "You must specify `--max[ip|icmp|tcp|udp]'");
+}
+
+void flow_print(struct ipt_flow_info *finfo)
+{
+	if ((finfo->proto & BIT_SET(IPFLOW_IP)) &&
+	    finfo->max)
+		printf("--maxip %i ", finfo->max);
+	if ((finfo->proto & BIT_SET(IPFLOW_ICMP)) &&
+	    finfo->max)
+		printf("--maxicmp %i ", finfo->max);
+	if ((finfo->proto & BIT_SET(IPFLOW_TCP)) &&
+	    finfo->max)
+		printf("--maxtcp %i ", finfo->max);
+	if ((finfo->proto & BIT_SET(IPFLOW_UDP)) &&
+	    finfo->max)
+		printf("--maxudp %i ", finfo->max);
+	if (finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK)) {
+		printf("--nm %s%s ", addr_to_dotted((struct in_addr *) &finfo->network),
+		       mask_to_dotted((struct in_addr *) &finfo->mask));
+	}
+}
+
+/* Prints out the matchinfo. */
+static void
+print(const struct ipt_ip *ip,
+      const struct ipt_entry_match *match,
+      int numeric)
+{
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)match->data;
+
+	printf("flow ");
+	flow_print(finfo);
+}
+
+/* Saves the matchinfo in parsable form to stdout. */
+static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
+{
+	struct ipt_flow_info *finfo = (struct ipt_flow_info *)match->data;
+
+	flow_print(finfo);
+}
+
+static struct option opts[] = {
+	{ .name = "maxip", .has_arg = 1, .flag =0, .val = 0 },
+	{ .name = "maxicmp", .has_arg = 1, .flag =0, .val = 1 },
+	{ .name = "maxtcp", .has_arg = 1, .flag = 0, .val = 6 },
+	{ .name = "maxudp", .has_arg = 1, .flag = 0, .val = 17 },
+	{ .name = "nm", .has_arg = 1, .flag = 0, .val = 256 },
+	{0}
+};
+
+static
+struct iptables_match flow = {
+	.next		= NULL,
+	.name		= "flow",
+	.version	= IPTABLES_VERSION,
+	.size		= IPT_ALIGN(sizeof(struct ipt_flow_info)),
+	.userspacesize	= IPT_ALIGN(sizeof(struct ipt_flow_info)),
+	.help		= &help,
+	.init		= &init,
+	.parse		= &parse,
+	.final_check	= &final_check,
+	.print		= &print,
+	.save		= &save,
+	.extra_opts	= opts
+};
+
+void _init(void)
+{
+	register_match(&flow);
+}
diff -Pru iptables-1.2.11/extensions/libipt_flow.man iptables-1.2.11-flow/extensions/libipt_flow.man
--- iptables-1.2.11/extensions/libipt_flow.man	1969-12-31 18:00:00.000000000 -0600
+++ iptables-1.2.11-flow/extensions/libipt_flow.man	2004-11-03 16:40:15.000000000 -0600
@@ -0,0 +1,13 @@
+This module, when combined with protocol flow counters (via connection
+tracking events), allows access to IP, ICMP, TCP and UDP flow counts
+per source IP address (IP_CT_DIR_ORIGINAL state from ip_conntrack)
+This match module can not be used in the raw table.
+.TP
+.BI "--maxip "   "n"
+.BI "--maxicmp " "n"
+.BI "--maxtcp "  "n"
+.BI "--maxudp "  "n"
+.BI "--nm "      "n.n.n.n/mm or n.n.n.n/m.m.m.m"
+Where n is the max number of connections to allow from some specified source.
+n.n.n.n is a network.  mm or m.m.m.m are the mask bits which should be
+ANDed with a source, the result of which is compared to the network portion.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-11-04  2:20       ` Josh Samuelson
@ 2004-11-06 15:19         ` Pablo Neira
  2004-11-08  2:52           ` Josh Samuelson
  2004-11-13 22:12         ` Pablo Neira
  1 sibling, 1 reply; 10+ messages in thread
From: Pablo Neira @ 2004-11-06 15:19 UTC (permalink / raw)
  To: Josh Samuelson; +Cc: netfilter-devel

Hi Josh,

Josh Samuelson wrote:

>You'll have to excuse my newness to the netfilter-devel scene and the
>list in general.  Thank you for pointing out the thread on the connection
>tracking events patch.  I had no idea work was being done there.
>Initial impression: Slick!
>
>I took your recommendations and changed the patch to use the connection
>tracking events API.  I also altered the flow match module to track
>the number of connections based on a network/mask option if one is
>specified.
>
>The kernel patch can only be applied after your CTE patch.  It should
>patch fine on 2.6.9, but 2.6.10-rc1 will reject a hunk for
>net/ipv4/netfilter/Kconfig because I added the lines necessary for
>IP_NF_CONNTRACK_EVENTS; easy enough to fix by hand though.
>
>The iptables patch should apply fine to vanilla iptables-1.2.11.
>  
>

nice work!

>I've only done some initial testing with this thus far: one machine
>routing through a patched machine.  Hopefully here in the next
>few days I can put this on a machine that routes a few hundred
>nodes to put some more load on it for testing purposes.
>  
>

please, don't forget that we are still discussing the event API. As this 
new match goes on top of it, we can hold this patch until the event API 
is pushed forward. Then we'll go back this point.

Some comments:

>diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h
>--- linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h	1969-12-31 18:00:00.000000000 -0600
>+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h	2004-11-03 19:10:13.000000000 -0600
>  
>

I see two possibilities here:

a) move ip_cte_flow.[h|c] to ipt_flow.[h|c], matches always fit in a file.
b) rename ip_cte_flow to ip_conntrack_flow_stats, this could be a module 
which generates stats about current connections going through the firewall.

I need to give more spins to this issue.

Any comments?

Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-11-06 15:19         ` Pablo Neira
@ 2004-11-08  2:52           ` Josh Samuelson
  2004-11-10 18:12             ` Pablo Neira
  0 siblings, 1 reply; 10+ messages in thread
From: Josh Samuelson @ 2004-11-08  2:52 UTC (permalink / raw)
  To: Pablo Neira; +Cc: netfilter-devel

Hi Pablo,

On Sat, Nov 06, 2004 at 04:19:22PM +0100, Pablo Neira wrote:
> Hi Josh,
> please, don't forget that we are still discussing the event API. As this 
> new match goes on top of it, we can hold this patch until the event API 
> is pushed forward. Then we'll go back this point.

Indeed.

> 
> Some comments:
> 
> >diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h 
> >linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h
> >--- linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h	1969-12-31 
> >18:00:00.000000000 -0600
> >+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h 2004-11-03 
> >19:10:13.000000000 -0600
> > 
> >
> 
> I see two possibilities here:
> 
> a) move ip_cte_flow.[h|c] to ipt_flow.[h|c], matches always fit in a file.
> b) rename ip_cte_flow to ip_conntrack_flow_stats, this could be a module 
> which generates stats about current connections going through the firewall.
> 
> I need to give more spins to this issue.
> 
> Any comments?
> 
> Pablo

In regards to ip_cte_flow.[h|c], I wasn't sure how to handle this module
with respect to the filesystem namespace.  Those files don't provide
any of the match functionality; it just tracks the flows from the CTE
API, exports a few functions, the linked list of flows and provides
"/proc/net/ip_cte_flow" file.  All of which I'm sure you know by the
source, but just to provide some context for those who perhaps haven't
glanced at it.  The main reason I called ip_cte_flow was because it's
built on/requires 'CTE' functionality.  I figure there is the
potential for a lot of modules needing the CTE API and perhaps the need
to separate those files that require it into a differing filesystem
namespace that can't really be classified as a match/target, etc?
If you prefer ip_conntrack_flow_stats, I'm really not partial to
anything.

The ipt_flow.[h|c] file builds on top of the prior module to provide the
match functionality and to track network/mask based flows.  I separated the
two because I can see the need to track the flows via /proc outside of the
iptables match.  I.e. to just have a quick glance at who may be responsible
for a sudden burst of flows.  Or to allow for other match modules
to build on top of it in ways that my simple match module lacks, etc.

Those are my thoughts on why I did things the way I did.  :)

Cheers,
Josh

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-11-08  2:52           ` Josh Samuelson
@ 2004-11-10 18:12             ` Pablo Neira
  0 siblings, 0 replies; 10+ messages in thread
From: Pablo Neira @ 2004-11-10 18:12 UTC (permalink / raw)
  To: Josh Samuelson; +Cc: netfilter-devel

Josh,

Josh Samuelson wrote:

>>Some comments:
>>
>>    
>>
>>>diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h 
>>>linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h
>>>--- linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h	1969-12-31 
>>>18:00:00.000000000 -0600
>>>+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h 2004-11-03 
>>>19:10:13.000000000 -0600
>>>
>>>
>>>      
>>>
>>I see two possibilities here:
>>
>>a) move ip_cte_flow.[h|c] to ipt_flow.[h|c], matches always fit in a file.
>>b) rename ip_cte_flow to ip_conntrack_flow_stats, this could be a module 
>>which generates stats about current connections going through the firewall.
>>
>>I need to give more spins to this issue.
>>
>>Any comments?
>>
>>Pablo
>>    
>>
>
>In regards to ip_cte_flow.[h|c], I wasn't sure how to handle this module
>with respect to the filesystem namespace.  Those files don't provide
>any of the match functionality; it just tracks the flows from the CTE
>API, exports a few functions, the linked list of flows and provides
>"/proc/net/ip_cte_flow" file.  All of which I'm sure you know by the
>source, but just to provide some context for those who perhaps haven't
>glanced at it.  The main reason I called ip_cte_flow was because it's
>built on/requires 'CTE' functionality.  I figure there is the
>potential for a lot of modules needing the CTE API
>

At this moment I can't imagine any other match/target which could use 
this CTE API, do you have anything in mind? I mean, I think that if only 
the flow match uses it (and noone else could benefit of it), it should 
go inside the flow match. In case that someone else could use it, then 
it makes sense for me to provide such API.

> and perhaps the need
>to separate those files that require it into a differing filesystem
>namespace that can't really be classified as a match/target, etc?
>If you prefer ip_conntrack_flow_stats, I'm really not partial to
>anything.
>  
>

I don't mind about the name either. I think that maybe it could be used 
to generate statistics about the current connections going through the 
firewall, in that case it makes sense creating a module in the namespace 
of ip_conntrack_whatever.

But I think that those statistics could be done with an user space 
program via ctnetlink API (netlink sockets), so this makes me thinks 
that we should put all the stuff in a single ipt_flow file.

>The ipt_flow.[h|c] file builds on top of the prior module to provide the
>match functionality and to track network/mask based flows.  I separated the
>two because I can see the need to track the flows via /proc outside of the
>iptables match.  I.e. to just have a quick glance at who may be responsible
>for a sudden burst of flows.  Or to allow for other match modules
>to build on top of it in ways that my simple match module lacks, etc.
>
>Those are my thoughts on why I did things the way I did.  :)
>  
>

nice, see your next email :)

Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] new match extension `flow'
  2004-11-04  2:20       ` Josh Samuelson
  2004-11-06 15:19         ` Pablo Neira
@ 2004-11-13 22:12         ` Pablo Neira
  1 sibling, 0 replies; 10+ messages in thread
From: Pablo Neira @ 2004-11-13 22:12 UTC (permalink / raw)
  To: Josh Samuelson; +Cc: netfilter-devel

Josh,

Some minor comments about the code that I had in mind.

Josh Samuelson wrote:

>diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h
>--- linux-2.6.9/include/linux/netfilter_ipv4/ip_cte_flow.h	1969-12-31 18:00:00.000000000 -0600
>+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ip_cte_flow.h	2004-11-03 19:10:13.000000000 -0600
>@@ -0,0 +1,28 @@
>+#ifndef _IP_CTE_FLOW_H
>+#define _IP_CTE_FLOW_H
>+
>+#define BIT_SET(b) (1 << (b))
>+#define INCREMENT_WITHOUT_OVERFLOW(c) if (c < ~(1 << ((sizeof(c) * 8) - 1))) c++
>+
>+/* A `ip_cte_flow' is a structure containing the IP connection count
>+   on various IP protocols.
>+*/
>+
>+struct ip_cte_flow
>+{
>+	 struct list_head list;
>+	 u_int32_t ip_ct_dir_original_ip,
>+		   ip,
>+		   icmp,
>+		   tcp,
>+		   udp;
>+};
>+
>+extern struct list_head *ip_cte_flow_hash;
>+extern unsigned int ip_cte_flow_htable_size;
>+DECLARE_RWLOCK_EXTERN(ip_cte_flow_lock);
>+extern int ip_cte_flow_register_notifier(struct notifier_block *nb);
>+extern int ip_cte_flow_unregister_notifier(struct notifier_block *nb);
>+extern u_int32_t ip_cte_hash_flow_ip(u_int32_t ip);
>+
>+#endif /* _IP_CTE_FLOW_H */
>diff -Pru linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h
>--- linux-2.6.9/include/linux/netfilter_ipv4/ipt_flow.h	1969-12-31 18:00:00.000000000 -0600
>+++ linux-2.6.9-flow/include/linux/netfilter_ipv4/ipt_flow.h	2004-11-03 17:02:37.000000000 -0600
>@@ -0,0 +1,41 @@
>+#ifndef _IPT_FLOW_H
>+#define _IPT_FLOW_H
>+
>+#define BIT_SET(b) (1 << (b))
>  
>

What if we use test_bit instead?

>+
>+typedef enum
>+{
>+	IPFLOW_IP,
>+	IPFLOW_ICMP,
>+	IPFLOW_TCP,
>+	IPFLOW_UDP,
>+	IPFLOW_NETWORK_MASK = 31
>+} ipflow_bits_t;
>+
>+struct ipt_flow_nm
>+{
>+	u_int32_t ip,
>+		  icmp,
>+		  tcp,
>+		  udp;
>+};
>+
>+struct ipt_flow_info
>+{
>+#ifdef __KERNEL__
>+	struct list_head list;
>+#else
>+	struct
>+	{
>+		void *next,
>+		     *prev;
>+	} list;
>+#endif
>+	u_int32_t proto,
>+		  network,
>+		  mask,
>+		  max;
>+	struct ipt_flow_nm *nm;
>+};
>+
>+#endif /* IPT_FLOW_H */
>diff -Pru linux-2.6.9/net/ipv4/netfilter/Kconfig linux-2.6.9-flow/net/ipv4/netfilter/Kconfig
>--- linux-2.6.9/net/ipv4/netfilter/Kconfig	2004-10-18 16:54:55.000000000 -0500
>+++ linux-2.6.9-flow/net/ipv4/netfilter/Kconfig	2004-11-02 11:01:58.000000000 -0600
>@@ -32,6 +32,28 @@
> 
> 	  If unsure, say `N'.
> 
>+config IP_NF_CONNTRACK_EVENTS
>+	bool "Connection tracking events"
>+	depends on IP_NF_CONNTRACK
>+	help
>+	  If unsure, say `N'.
>+	  
>+config IP_NF_CTE_FLOW
>+	tristate "Connection tracking events: protocol flow counters (EXPERIMENTAL)"
>+	depends on IP_NF_CONNTRACK_EVENTS && EXPERIMENTAL
>+	help
>+	  This option uses the connection tracking event notifiers to
>+	  keep protocol flow counters indexed by the original direction
>+	  source IP address.  The protocol counters include generic IP,
>+	  ICMP, TCP and UDP.
>+
>+	  These counters can be read from "/proc/net/ip_cte_flow".
>+
>+	  These counters can be used in the "protocol flow counters match
>+	  support", see below.
>+
>+	  To compile it as a module, choose M here.  If unsure, say N.
>+
> config IP_NF_CT_PROTO_SCTP
> 	tristate  'SCTP protocol connection tracking support (EXPERIMENTAL)'
> 	depends on IP_NF_CONNTRACK && EXPERIMENTAL
>@@ -279,6 +301,22 @@
> 
> 	  To compile it as a module, choose M here.  If unsure, say N.
> 
>+config IP_NF_CTE_MATCH_FLOW
>+       tristate 'protocol flow counters match support (EXPERIMENTAL)'
>+       depends on IP_NF_IPTABLES && IP_NF_CTE_FLOW && (IP_NF_FILTER || IP_NF_NAT || IP_NF_MANGLE) && EXPERIMENTAL
>+       help
>+         `flow' matching allows you to match a packet when the specified
>+         number of known protocol connections from a original direction
>+         source IP address is exceeded.  The matches can be made against
>+         generic IP, ICMP, TCP or UDP flow counters.  This match can be
>+         used in all tables but raw.
>+
>+         For example, this match allows you to control the number and type
>+         of connections (flows) from hosts in a known local network routing
>+         through the machine.
>+
>+         To compile it as a module, choose M here.  If unsure, say N.
>+
> config IP_NF_MATCH_OWNER
> 	tristate "Owner match support"
> 	depends on IP_NF_IPTABLES
>diff -Pru linux-2.6.9/net/ipv4/netfilter/Makefile linux-2.6.9-flow/net/ipv4/netfilter/Makefile
>--- linux-2.6.9/net/ipv4/netfilter/Makefile	2004-10-18 16:53:43.000000000 -0500
>+++ linux-2.6.9-flow/net/ipv4/netfilter/Makefile	2004-11-02 10:58:21.000000000 -0600
>@@ -34,6 +34,9 @@
> obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
> obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
> 
>+# connection tracking event objects
>+obj-$(CONFIG_IP_NF_CTE_FLOW) += ip_cte_flow.o
>+
> # generic IP tables 
> obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
> 
>@@ -67,6 +70,7 @@
> obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
> obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
> obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
>+obj-$(CONFIG_IP_NF_CTE_MATCH_FLOW) += ipt_flow.o
> 
> # targets
> obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
>diff -Pru linux-2.6.9/net/ipv4/netfilter/ip_cte_flow.c linux-2.6.9-flow/net/ipv4/netfilter/ip_cte_flow.c
>--- linux-2.6.9/net/ipv4/netfilter/ip_cte_flow.c	1969-12-31 18:00:00.000000000 -0600
>+++ linux-2.6.9-flow/net/ipv4/netfilter/ip_cte_flow.c	2004-11-03 20:12:09.000000000 -0600
>@@ -0,0 +1,377 @@
>+/* Kernel module to track [IP|ICMP|TCP|UDP] flow counts. */
>+
>+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
>+ *
>+ * This program is free software; you can redistribute it and/or modify
>+ * it under the terms of the GNU General Public License version 2 as
>+ * published by the Free Software Foundation.
>+ */
>+
>+#include <linux/module.h>
>+#include <linux/vmalloc.h>
>+#include <linux/jhash.h>
>+#include <linux/random.h>
>+#include <linux/in.h>
>+#include <linux/notifier.h>
>+#ifdef CONFIG_PROC_FS
>+#include <linux/proc_fs.h>
>+#include <linux/seq_file.h>
>+#endif
>+#include <linux/netfilter_ipv4/ip_conntrack.h>
>+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
>+#include <linux/netfilter_ipv4/ip_cte_flow.h>
>+
>+MODULE_LICENSE("GPL");
>+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
>+MODULE_DESCRIPTION("protocol flow count tracking via connection tracking events");
>+
>+DECLARE_RWLOCK(ip_cte_flow_lock);
>+struct list_head *ip_cte_flow_hash;
>+unsigned int ip_cte_flow_htable_size = 0;
>+/* static atomic_t ip_cte_flow_count = ATOMIC_INIT(0); */
>+static int ip_cte_flow_hash_rnd;
>+static kmem_cache_t *ip_cte_flow_cachep;
>+static struct notifier_block *ip_cte_flow_nb_chain;
>+
>+#ifdef CONFIG_PROC_FS
>+static void *ip_cte_flow_seq_start(struct seq_file *s, loff_t *pos)
>+{
>+	if (*pos >= ip_cte_flow_htable_size)
>+		return NULL;
>+	return &ip_cte_flow_hash[*pos];
>+}
>+  
>+static void ip_cte_flow_seq_stop(struct seq_file *s, void *v)
>+{
>+}
>+
>+static void *ip_cte_flow_seq_next(struct seq_file *s, void *v, loff_t *pos)
>+{
>+	(*pos)++;
>+	if (*pos >= ip_cte_flow_htable_size)
>+		return NULL;
>+	return &ip_cte_flow_hash[*pos];
>+}
>+
>+static int ip_cte_flow_seq_show(struct seq_file *s, void *v)
>+{
>+	int ret = 0;
>+	struct list_head *list;
>+	struct ip_cte_flow *flow;
>+
>+	list = (struct list_head *) v;
>+	list = list->next;
>+	READ_LOCK(&ip_cte_flow_lock);
>+	list_for_each(list, (struct list_head *) v) {
>+		flow = (struct ip_cte_flow *) list;
>+		if (seq_printf(s, "%u.%u.%u.%u IP: %u ICMP: %hu "
>+				  "TCP: %hu UDP: %hu\n",
>+				  NIPQUAD(flow->ip_ct_dir_original_ip),
>+				  flow->ip,
>+				  flow->icmp,
>+				  flow->tcp,
>+				  flow->udp)) {
>+			ret = -ENOSPC;
>+			break;
>+		}
>+	}
>+	READ_UNLOCK(&ip_cte_flow_lock);
>+	return ret;
>+}
>+  
>+static struct seq_operations ip_cte_flow_seq_ops = {
>+	.start = ip_cte_flow_seq_start,
>+	.next  = ip_cte_flow_seq_next,
>+	.stop  = ip_cte_flow_seq_stop,
>+	.show  = ip_cte_flow_seq_show
>+};
>+
>+static int ip_cte_flow_open(struct inode *inode, struct file *file)
>+{
>+	return seq_open(file, &ip_cte_flow_seq_ops);
>+}
>+
>+static struct file_operations ip_cte_flow_file_ops = {
>+	.owner   = THIS_MODULE,
>+	.open    = ip_cte_flow_open,
>+	.read    = seq_read,
>+	.llseek  = seq_lseek,
>+	.release = seq_release
>+};
>+#endif /* CONFIG_PROC_FS */
>+  
>+int
>+ip_cte_flow_register_notifier(struct notifier_block *nb)
>+{
>+	return notifier_chain_register(&ip_cte_flow_nb_chain, nb);
>+}
>+
>+int
>+ip_cte_flow_unregister_notifier(struct notifier_block *nb)
>+{
>+	return notifier_chain_unregister(&ip_cte_flow_nb_chain, nb);
>+}
>+
>+void
>+ip_cte_flow_event(enum ip_conntrack_events event, struct ip_conntrack *ct)
>+{
>+	notifier_call_chain(&ip_cte_flow_nb_chain, event, ct);
>+}
>+
>+u_int32_t
>+ip_cte_hash_flow_ip(u_int32_t ip)
>+{
>+	return(jhash_1word(ip, ip_cte_flow_hash_rnd) % ip_cte_flow_htable_size);
>+}
>+
>+static int
>+ip_cte_flow_inc(struct ip_conntrack_tuple_hash *hash)
>+{
>+	unsigned int flow_hash;
>+	u_int32_t ip;
>+	struct list_head *list;
>+	struct ip_cte_flow *flow = NULL;
>+
>+	ip = hash->tuple.src.ip;
>+	flow_hash = ip_cte_hash_flow_ip(ip);
>+	READ_LOCK(&ip_cte_flow_lock);
>+	list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
>+		if (((struct ip_cte_flow *) list)->ip_ct_dir_original_ip ==
>+		    ip) {
>+			flow = (struct ip_cte_flow *) list;
>+			break;
>+		}
>+	}
>+	READ_UNLOCK(&ip_cte_flow_lock);
>  
>

"Code which grabs a read lock, searches a list, fails to find what if 
wants, drops the read lock, grabs a write lock and insert the object has 
a race condition". --Rusty's unreliable guide to locking

So we have to fix this :)

>+	WRITE_LOCK(&ip_cte_flow_lock);
>+	if (!flow) {
>+		flow = kmem_cache_alloc(ip_cte_flow_cachep, GFP_ATOMIC);
>+		if (flow) {
>+			/* atomic_inc(&ip_cte_flow_count); */
>+			memset(flow, 0, sizeof(struct ip_cte_flow));
>+			flow->ip_ct_dir_original_ip = ip;
>+			list = (struct list_head *) flow;
>+			list_add(list, &ip_cte_flow_hash[flow_hash]);
>+		}
>+	}
>+	if (flow) {
>+		INCREMENT_WITHOUT_OVERFLOW(flow->ip);
>+		switch (hash->tuple.dst.protonum) {
>+			case IPPROTO_ICMP:
>+				INCREMENT_WITHOUT_OVERFLOW(flow->icmp);
>+				break;
>+			case IPPROTO_TCP:
>+				INCREMENT_WITHOUT_OVERFLOW(flow->tcp);
>+				break;
>+			case IPPROTO_UDP:
>+				INCREMENT_WITHOUT_OVERFLOW(flow->udp);
>+				break;
>+		}
>+	}
>+	WRITE_UNLOCK(&ip_cte_flow_lock);
>+	return(flow == NULL);
>+}
>+
>+static void
>+ip_cte_flow_dec(struct ip_conntrack_tuple_hash *hash)
>+{
>+	unsigned int flow_hash;
>+	u_int32_t ip;
>+	struct list_head *list;
>+	struct ip_cte_flow *flow = NULL;
>+
>+	ip = hash->tuple.src.ip;
>+	flow_hash = ip_cte_hash_flow_ip(ip);
>+	READ_LOCK(&ip_cte_flow_lock);
>+	list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
>+		if (((struct ip_cte_flow *) list)->ip_ct_dir_original_ip ==
>+		    ip) {
>+			flow = (struct ip_cte_flow *) list;
>+			break;
>+		}
>+	}
>+	READ_UNLOCK(&ip_cte_flow_lock);
>  
>

Same problem

>+	if (flow) {
>+		WRITE_LOCK(&ip_cte_flow_lock);
>+		if(flow->ip)
>+			flow->ip--;
>+		switch (hash->tuple.dst.protonum) {
>+			case IPPROTO_ICMP:
>+				if (flow->icmp)
>+					flow->icmp--;
>+				break;
>+			case IPPROTO_TCP:
>+				if (flow->tcp)
>+					flow->tcp--;
>+				break;
>+			case IPPROTO_UDP:
>+				if (flow->udp)
>+					flow->udp--;
>+				break;
>+		}
>+		if (flow->ip == 0) {
>+			list = (struct list_head *) flow;
>+			list_del(list);
>+			kmem_cache_free(ip_cte_flow_cachep, flow);
>+			/* atomic_dec(&ip_cte_flow_count); */
>+		}
>+		WRITE_UNLOCK(&ip_cte_flow_lock);
>+	} else {
>+		printk(KERN_WARNING "conntrack being destroyed, "
>+				    "yet not found on flow list\n"
>+				    "%u src: %u.%u.%u.%u dst: %u.%u.%u.%u\n",
>+				    hash->tuple.dst.protonum,
>+				    NIPQUAD(hash->tuple.src.ip),
>+				    NIPQUAD(hash->tuple.dst.ip));
>+	}
>+}
>+
>+static void
>+ip_cte_flow_existing(void)
>+{
>+	unsigned int i;
>+	struct list_head *list;
>+	struct ip_conntrack_tuple_hash *hash;
>+
>+	READ_LOCK(&ip_conntrack_lock);
>+	for (i = 0; i < ip_conntrack_htable_size; i++) {
>+		list_for_each(list, &ip_conntrack_hash[i]) {
>+			hash = (struct ip_conntrack_tuple_hash *) list;
>+			if (!DIRECTION(hash))
>+				ip_cte_flow_inc(hash);
>+		}
>+	}
>+	READ_UNLOCK(&ip_conntrack_lock);
>+}
>+
>+static void
>+ip_cte_flow_destroy(void)
>+{
>+	unsigned int i;
>+	struct list_head *list;
>+	struct ip_cte_flow *flow;
>+
>+	WRITE_LOCK(&ip_cte_flow_lock);
>+	for (i = 0; i < ip_cte_flow_htable_size; i++) {
>+		list = ip_cte_flow_hash[i].next;
>+		while (list != &ip_cte_flow_hash[i]) {
>+				flow = (struct ip_cte_flow *) list;
>+				list = list->next;
>+				list_del((struct list_head *) flow);
>+				kmem_cache_free(ip_cte_flow_cachep, flow);
>+		}
>+	}
>+	WRITE_UNLOCK(&ip_cte_flow_lock);
>+}
>+
>+static int ip_cte_flow_notifier(struct notifier_block *nb,
>+	unsigned long ips,
>+	void *v)
>+{
>+	struct ip_conntrack *ct = v;
>+
>+	switch (ips)
>+	{
>+		case BIT_SET(IPCT_NEW):
>+		case BIT_SET(IPCT_RELATED):
>+			ip_cte_flow_inc(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
>+			ip_cte_flow_event(ips, ct);
>+			break;
>+		case BIT_SET(IPCT_DESTROY):
>+			ip_cte_flow_dec(&ct->tuplehash[IP_CT_DIR_ORIGINAL]);
>+			ip_cte_flow_event(ips, ct);
>+			break;
>+	}
>+	return NOTIFY_OK;
>+}
>+
>+static struct notifier_block ip_cte_flow_nb = {
>+	.notifier_call = ip_cte_flow_notifier,
>+	.next = NULL,
>+	.priority = 0
>+};
>+
>+static int __init init(void)
>+{
>+	unsigned int i;
>+#ifdef CONFIG_PROC_FS
>+	struct proc_dir_entry *proc_flow;
>+#endif
>+
>+	need_ip_conntrack();
>+
>+	ip_cte_flow_htable_size = ip_conntrack_htable_size / 2;
>+
>+	get_random_bytes(&ip_cte_flow_hash_rnd, 4);
>+
>+	ip_cte_flow_hash = vmalloc(sizeof(struct list_head)
>+					* ip_cte_flow_htable_size);
>+	if (!ip_cte_flow_hash) {
>+		printk(KERN_ERR "Unable to create ip_cte_flow_hash\n");
>+		goto err;
>+	}
>+
>+	for (i = 0; i < ip_cte_flow_htable_size; i++)
>+		INIT_LIST_HEAD(&ip_cte_flow_hash[i]);
>+
>+	ip_cte_flow_cachep = kmem_cache_create("ip_cte_flow",
>+						sizeof(struct ip_cte_flow), 0,
>+						SLAB_HWCACHE_ALIGN, NULL, NULL);
>+	if (!ip_cte_flow_cachep) {
>+		printk(KERN_ERR "Unable to create ip_cte_flow slab cache\n");
>+		goto err_free_hash;
>+ 	}
>+
>+#ifdef CONFIG_PROC_FS
>+	proc_flow = proc_net_fops_create("ip_cte_flow", 0440, &ip_cte_flow_file_ops);
>+	if (!proc_flow) goto err_free_slab;
>+#endif
>+
>+	if (ip_conntrack_register_notifier(&ip_cte_flow_nb)) {
>+		goto cleanup_proc;
>+	}
>+
>+	ip_cte_flow_existing();
>+
>+	printk("connection tracking events: protocol flow counters "
>+	       "(%u buckets) - %Zd bytes per IP_CT_DIR_ORIGINAL source\n",
>+	       ip_cte_flow_htable_size,
>+	       sizeof(struct ip_cte_flow));
>+
>+	return 0;
>+
>+cleanup_proc:
>+#ifdef CONFIG_PROC_FS
>+	proc_net_remove("ip_cte_flow");
>+#endif
>+err_free_slab:
>+ 	kmem_cache_destroy(ip_cte_flow_cachep);
>+err_free_hash:
>+	vfree(ip_cte_flow_hash);
>+err:
>+	return -ENOMEM;
>+}
>+
>+static void __exit fini(void)
>+{
>+	ip_cte_flow_destroy();
>+
>+	if (ip_conntrack_unregister_notifier(&ip_cte_flow_nb))
>+		printk(KERN_ERR "ip_conntrack_unregister_notifier() "
>+				"failed, huh?\n");
>+#ifdef CONFIG_PROC_FS
>+	proc_net_remove("ip_cte_flow");
>+#endif
>+	kmem_cache_destroy(ip_cte_flow_cachep);
>+	vfree(ip_cte_flow_hash);
>+}
>+
>+module_init(init);
>+module_exit(fini);
>+
>+EXPORT_SYMBOL(ip_cte_flow_hash);
>+EXPORT_SYMBOL(ip_cte_flow_htable_size);
>+EXPORT_SYMBOL(ip_cte_flow_lock);
>+EXPORT_SYMBOL(ip_cte_flow_register_notifier);
>+EXPORT_SYMBOL(ip_cte_flow_unregister_notifier);
>+EXPORT_SYMBOL(ip_cte_hash_flow_ip);
>diff -Pru linux-2.6.9/net/ipv4/netfilter/ipt_flow.c linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c
>--- linux-2.6.9/net/ipv4/netfilter/ipt_flow.c	1969-12-31 18:00:00.000000000 -0600
>+++ linux-2.6.9-flow/net/ipv4/netfilter/ipt_flow.c	2004-11-03 20:12:00.000000000 -0600
>@@ -0,0 +1,318 @@
>+/* Kernel module to match [IP|ICMP|TCP|UDP] flow counts. */
>+
>+/* (C) 2004 Josh Samuelson <josamue1@wsc.edu>
>+ *
>+ * This program is free software; you can redistribute it and/or modify
>+ * it under the terms of the GNU General Public License version 2 as
>+ * published by the Free Software Foundation.
>+ */
>+
>+#include <linux/module.h>
>+#include <linux/skbuff.h>
>+#include <linux/vmalloc.h>
>+#include <linux/notifier.h>
>+#include <linux/list.h>
>+#include <linux/netfilter_ipv4/ip_tables.h>
>+#include <linux/netfilter_ipv4/ip_conntrack.h>
>+#include <linux/netfilter_ipv4/ip_cte_flow.h>
>+#include <linux/netfilter_ipv4/ipt_flow.h>
>+
>+MODULE_LICENSE("GPL");
>+MODULE_AUTHOR("Josh Samuelson <josamue1@wsc.edu>");
>+MODULE_DESCRIPTION("protocol flow count match module");
>+
>+
>+DECLARE_RWLOCK(ipt_flow_lock);
>+LIST_HEAD(ipt_flow_notifier_list);
>+static atomic_t ipt_flow_notifier_list_count = ATOMIC_INIT(0);
>+static struct notifier_block ipt_flow_nb;
>+
>+static void
>+ipt_flow_nw_inc(struct ip_conntrack *ct,
>+	struct ipt_flow_info *finfo)
>+{
>+	INCREMENT_WITHOUT_OVERFLOW(finfo->nm->ip);
>+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
>+		case IPPROTO_ICMP:
>+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->icmp);
>+			break;
>+		case IPPROTO_TCP:
>+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->tcp);
>+			break;
>+		case IPPROTO_UDP:
>+			INCREMENT_WITHOUT_OVERFLOW(finfo->nm->udp);
>+			break;
>+	}
>+}
>+
>+static void
>+ipt_flow_nw_dec(struct ip_conntrack *ct,
>+	struct ipt_flow_info *finfo)
>+{
>+	if (finfo->nm->ip)
>+		finfo->nm->ip--;
>+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
>+		case IPPROTO_ICMP:
>+			if (finfo->nm->icmp)
>+				finfo->nm->icmp--;
>+			break;
>+		case IPPROTO_TCP:
>+			if (finfo->nm->tcp)
>+				finfo->nm->tcp--;
>+			break;
>+		case IPPROTO_UDP:
>+			if (finfo->nm->udp)
>+				finfo->nm->udp--;
>+			break;
>+	}
>+}
>+
>+static void
>+ipt_flow_existing(struct ipt_flow_info *finfo)
>+{
>+        unsigned int i;
>+        struct list_head *list;
>+        struct ip_cte_flow *flow;
>+
>+        READ_LOCK(&ip_cte_flow_lock);
>+        for (i = 0; i < ip_cte_flow_htable_size; i++) {
>+                list_for_each(list, &ip_cte_flow_hash[i]) {
>+                        flow = (struct ip_cte_flow *) list;
>+			if ((flow->ip_ct_dir_original_ip & finfo->mask) ==
>+			    finfo->network) {
>+				finfo->nm->ip += flow->ip;
>+				finfo->nm->icmp += flow->icmp;
>+				finfo->nm->tcp += flow->tcp;
>+				finfo->nm->udp += flow->udp;
>+			}
>+                }
>+        }
>+        READ_UNLOCK(&ip_cte_flow_lock);
>+}
>+
>+int
>+ipt_flow_nm_notifier(struct notifier_block *self,
>+	unsigned long event, void *vct)
>+{
>+	struct list_head *list;
>+	struct ipt_flow_info *finfo;
>+	struct ip_conntrack *ct = vct;
>+	u_int32_t ip;
>+
>+	READ_LOCK(&ipt_flow_lock);
>+	ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
>+	list_for_each(list, &ipt_flow_notifier_list) {
>+		finfo = (struct ipt_flow_info *) list;
>+		if ((ip & finfo->mask) == finfo->network) {
>+			switch (event) {
>+				case BIT_SET(IPCT_NEW):
>+				case BIT_SET(IPCT_RELATED):
>+					ipt_flow_nw_inc(ct, finfo);
>+					break;
>+				case BIT_SET(IPCT_DESTROY):
>+					ipt_flow_nw_dec(ct, finfo);
>+					break;
>+			}
>+		}
>+	}
>+	READ_UNLOCK(&ipt_flow_lock);
>+}
>+
>+static int
>+match(const struct sk_buff *skb,
>+	const struct net_device *in,
>+	const struct net_device *out,
>+	const void *matchinfo,
>+	int offset,
>+	int *hotdrop)
>+{
>+	const struct ipt_flow_info *finfo = matchinfo;
>+	struct list_head *list;
>+	struct ip_cte_flow *flow = NULL;
>+	u_int16_t proto;
>+	u_int32_t flow_hash,
>+		  ip;
>+	int ret = 0;
>+
>+	proto = skb->nh.iph->protocol;
>+	ip = skb->nh.iph->saddr;
>+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK)) &&
>+	    ((ip & finfo->mask) == finfo->network)) {
>+		if ((finfo->nm->ip >= finfo->max) &&
>+		    (finfo->proto & BIT_SET(IPFLOW_IP)))
>+			ret = 1;
>+		switch (proto) {
>+			case IPPROTO_ICMP:
>+				if ((finfo->nm->icmp >= finfo->max) &&
>+				    (finfo->proto & BIT_SET(IPFLOW_ICMP)))
>+					ret = 1;
>+				break;
>+			case IPPROTO_TCP:
>+				if ((finfo->nm->tcp >= finfo->max) &&
>+				    (finfo->proto & BIT_SET(IPFLOW_TCP)))
>+					ret = 1;
>+				break;
>+			case IPPROTO_UDP:
>+				if ((finfo->nm->udp >= finfo->max) &&
>+				    (finfo->proto & BIT_SET(IPFLOW_UDP)))
>+					ret = 1;
>+				break;
>+		}
>+	} else {
>+		flow_hash = ip_cte_hash_flow_ip(ip);
>+		READ_LOCK(&ip_cte_flow_lock);
>+		list_for_each(list, &ip_cte_flow_hash[flow_hash]) {
>+			if (((struct ip_cte_flow *)
>+			    list)->ip_ct_dir_original_ip == ip) {
>+				flow = (struct ip_cte_flow *) list;
>+				break;
>+			}
>+		}
>+		if (flow) {
>+			if ((flow->ip >= finfo->max) &&
>+			    (finfo->proto & BIT_SET(IPFLOW_IP)))
>+				ret = 1;
>+			switch (proto) {
>+				case IPPROTO_ICMP:
>+					if ((flow->icmp >= finfo->max) &&
>+					    (finfo->proto &
>+					    BIT_SET(IPFLOW_ICMP)))
>+						ret = 1;
>+					break;
>+				case IPPROTO_TCP:
>+					if ((flow->tcp >= finfo->max) &&
>+					    (finfo->proto &
>+					    BIT_SET(IPFLOW_TCP)))
>+						ret = 1;
>+					break;
>+				case IPPROTO_UDP:
>+					if ((flow->udp >= finfo->max) &&
>+					    (finfo->proto &
>+					    BIT_SET(IPFLOW_UDP)))
>+						ret = 1;
>+					break;
>+			}
>+		}
>+		READ_UNLOCK(&ip_cte_flow_lock);
>+	}
>+	return(ret);
>+}
>+
>+static int check(const char *tablename,
>+	const struct ipt_ip *ip,
>+	void *matchinfo,
>+	unsigned int matchsize,
>+	unsigned int hook_mask)
>+{
>+	struct list_head *list;
>+	struct ipt_flow_info *finfo = matchinfo;
>+
>+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
>+		return 0;
>+
>+	if (strcmp(tablename, "raw") == 0) {
>+		printk(KERN_WARNING "flow: can not by used in the \"raw\" table\n");
>+		return 0;
>+	}
>+
>+	switch (ip->proto & ~BIT_SET(IPFLOW_NETWORK_MASK)) {
>+		case IPPROTO_IP:
>+			if ((finfo->proto & BIT_SET(IPFLOW_IP)) &&
>+			    (finfo->proto &
>+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
>+			    ~BIT_SET(IPFLOW_IP)))
>+				return 0;
>+			break;
>+		case IPPROTO_ICMP:
>+			if ((finfo->proto & BIT_SET(IPFLOW_ICMP)) &&
>+			    (finfo->proto &
>+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
>+			    ~BIT_SET(IPFLOW_ICMP)))
>+				return 0;
>+			break;
>+		case IPPROTO_TCP:
>+			if ((finfo->proto & BIT_SET(IPFLOW_TCP)) &&
>+			    (finfo->proto &
>+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
>+			    ~BIT_SET(IPFLOW_TCP)))
>+				return 0;
>+			break;
>+		case IPPROTO_UDP:
>+			if ((finfo->proto & BIT_SET(IPFLOW_UDP)) &&
>+			    (finfo->proto &
>+			    ~BIT_SET(IPFLOW_NETWORK_MASK) &
>+			    ~BIT_SET(IPFLOW_UDP)))
>+				return 0;
>+			break;
>+		default:
>+			return 0;
>+	}
>+
>+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK))) {
>+		finfo->nm = vmalloc(sizeof(struct ipt_flow_nm));
>  
>

I think that we can use kmalloc instead, the use of vmalloc is 
restricted to big allocations, like big arrays.

>+		if (!finfo->nm)
>+			return 0;
>+		memset(finfo->nm, 0, sizeof(struct ipt_flow_nm));
>+		WRITE_LOCK(&ipt_flow_lock);
>+		ipt_flow_existing(finfo);
>+		list = (struct list_head *) finfo;
>+		list_add(list, &ipt_flow_notifier_list);
>+		if (atomic_read(&ipt_flow_notifier_list_count) == 0) {
>+			ipt_flow_nb.notifier_call = ipt_flow_nm_notifier;
>+			if (ip_cte_flow_register_notifier(&ipt_flow_nb)) {
>+				vfree(finfo->nm);
>+				return 0;
>+			}
>+		}
>+		atomic_inc(&ipt_flow_notifier_list_count);
>+		WRITE_UNLOCK(&ipt_flow_lock);
>+	}
>+	return 1;
>+}
>+
>+void destroy(void *matchinfo,
>+	unsigned int matchsize)
>+{
>+	struct list_head *list;
>+	struct ipt_flow_info *finfo = matchinfo;
>+
>+	if (matchsize != IPT_ALIGN(sizeof(struct ipt_flow_info)))
>+		return;
>+
>+	if ((finfo->proto & BIT_SET(IPFLOW_NETWORK_MASK)) && finfo->nm) {
>+		WRITE_LOCK(&ipt_flow_lock);
>+		atomic_dec(&ipt_flow_notifier_list_count);
>+		if (atomic_read(&ipt_flow_notifier_list_count) == 0) {
>+			if (ip_cte_flow_unregister_notifier(&ipt_flow_nb))
>+				printk(KERN_ERR
>+				       "ip_cte_flow_unregister_notifier failed"
>+				       ", huh?\n");
>+		}
>+		list = (struct list_head *) finfo;
>+		list_del(list);
>+		WRITE_UNLOCK(&ipt_flow_lock);
>+		vfree(finfo->nm);
>+	}
>+}
>+
>+static struct ipt_match flow_match = {
>+	.name		= "flow",
>+	.match		= &match,
>+	.checkentry	= &check,
>+	.destroy	= &destroy,
>+	.me		= THIS_MODULE,
>+};
>+
>+static int __init init(void)
>+{
>+	need_ip_conntrack();
>+	return ipt_register_match(&flow_match);
>+}
>+
>+static void __exit fini(void)
>+{
>+	ipt_unregister_match(&flow_match);
>+}
>+
>+module_init(init);
>+module_exit(fini);
>  
>

--Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2004-11-13 22:12 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2004-10-28  2:05 [PATCH] new match extension `flow' Josh Samuelson
2004-10-28 20:15 ` Josh Samuelson
2004-10-29 19:32 ` Pablo Neira
2004-10-31  6:38   ` Josh Samuelson
2004-10-31 14:41     ` Pablo Neira
2004-11-04  2:20       ` Josh Samuelson
2004-11-06 15:19         ` Pablo Neira
2004-11-08  2:52           ` Josh Samuelson
2004-11-10 18:12             ` Pablo Neira
2004-11-13 22:12         ` Pablo Neira

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.