All of lore.kernel.org
 help / color / mirror / Atom feed
* [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred
@ 2014-06-17 19:55 Sven Eckelmann
  2014-06-17 19:55 ` [B.A.T.M.A.N.] [PATCH 2/7] batmand: Use memleak/error path free implementation of hash_resize Sven Eckelmann
                   ` (5 more replies)
  0 siblings, 6 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:55 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

All new functionality, cleanup and similar things to the hash implementation
were done in alfred. So import the newest version to keep it in sync.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 hash.c | 88 +++++++++++++++++++++++++++++++++++++++++-------------------------
 hash.h | 69 ++++++++++++++++++++++++++++++---------------------
 2 files changed, 96 insertions(+), 61 deletions(-)

diff --git a/hash.c b/hash.c
index 2a9f557..7271b89 100644
--- a/hash.c
+++ b/hash.c
@@ -20,11 +20,11 @@
  */
 
 
-#include <stdio.h>	/* NULL */
 #include "hash.h"
+#include <stdlib.h>
+#include <stdio.h>
 #include "allocate.h"
 
-
 /* clears the hash */
 void hash_init(struct hashtable_t *hash)
 {
@@ -70,9 +70,17 @@ void hash_destroy(struct hashtable_t *hash)
 	debugFree(hash, 1303);
 }
 
+/* free hash_it_t pointer when stopping hash_iterate early */
+void hash_iterate_free(struct hash_it_t *iter_in)
+{
+	debugFree(iter_in, 1304);
+}
+
 /* iterate though the hash. first element is selected with iter_in NULL.
- * use the returned iterator to access the elements until hash_it_t returns NULL. */
-struct hash_it_t *hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_in)
+ * use the returned iterator to access the elements until hash_it_t returns
+ * NULL. */
+struct hash_it_t *hash_iterate(struct hashtable_t *hash,
+			       struct hash_it_t *iter_in)
 {
 	struct hash_it_t *iter;
 
@@ -81,14 +89,17 @@ struct hash_it_t *hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_
 		iter->index =  -1;
 		iter->bucket = NULL;
 		iter->prev_bucket = NULL;
-	} else
-		iter= iter_in;
+	} else {
+		iter = iter_in;
+	}
 
-	/* sanity checks first (if our bucket got deleted in the last iteration): */
+	/* sanity checks first (if our bucket got deleted in the last
+	 * iteration): */
 	if (iter->bucket != NULL) {
 		if (iter->first_bucket != NULL) {
 
-			/* we're on the first element and it got removed after the last iteration. */
+			/* we're on the first element and it got removed after
+			 * the last iteration. */
 			if ((*iter->first_bucket) != iter->bucket) {
 
 				/* there are still other elements in the list */
@@ -105,9 +116,10 @@ struct hash_it_t *hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_
 
 		} else if (iter->prev_bucket != NULL) {
 
-			/* we're not on the first element, and the bucket got removed after the last iteration.
-			* the last bucket's next pointer is not pointing to our actual bucket anymore.
-			* select the next. */
+			/* we're not on the first element, and the bucket got
+			 * removed after the last iteration. The last bucket's
+			 * next pointer is not pointing to our actual bucket
+			 * anymore. Select the next. */
 			if (iter->prev_bucket->next != iter->bucket)
 				iter->bucket = iter->prev_bucket;
 
@@ -124,7 +136,8 @@ struct hash_it_t *hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_
 			return iter;
 		}
 	}
-	/* if not returned yet, we've reached the last one on the index and have to search forward */
+	/* if not returned yet, we've reached the last one on the index and
+	 * have to search forward */
 
 	iter->index++;
 	/* go through the entries of the hash table */
@@ -142,12 +155,13 @@ struct hash_it_t *hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_
 	}
 
 	/* nothing to iterate over anymore */
-	debugFree(iter, 1304);
+	hash_iterate_free(iter);
 	return NULL;
 }
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size, hashdata_compare_cb compare, hashdata_choose_cb choose)
+struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
+			     hashdata_choose_cb choose)
 {
 	struct hashtable_t *hash;
 
@@ -155,8 +169,8 @@ struct hashtable_t *hash_new(int size, hashdata_compare_cb compare, hashdata_cho
 	if (!hash)
 		return NULL;
 
-	hash->size= size;
-	hash->table= debugMalloc(sizeof(struct element_t *) * size, 303);
+	hash->size = size;
+	hash->table = debugMalloc(sizeof(struct element_t *)*size, 303);
 
 	if (!hash->table) {
 		debugFree(hash, 1305);
@@ -183,11 +197,11 @@ int hash_add(struct hashtable_t *hash, void *data)
 			return -1;
 
 		prev_bucket = bucket;
-		bucket= bucket->next;
+		bucket = bucket->next;
 	}
 
 	/* found the tail of the list, add new element */
-	bucket = debugMalloc(sizeof(struct element_t),304);
+	bucket = debugMalloc(sizeof(struct element_t), 304);
 
 	if (!bucket)
 		return -1;
@@ -197,17 +211,17 @@ int hash_add(struct hashtable_t *hash, void *data)
 	bucket->next = NULL;
 
 	/* and link it */
-	if (prev_bucket == NULL) {
+	if (prev_bucket == NULL)
 		hash->table[index] = bucket;
-	} else {
+	else
 		prev_bucket->next = bucket;
-	}
 
 	hash->elements++;
 	return 0;
 }
 
-/* finds data, based on the key in keydata. returns the found data on success, or NULL on error */
+/* finds data, based on the key in keydata. returns the found data on success,
+ * or NULL on error */
 void *hash_find(struct hashtable_t *hash, void *keydata)
 {
 	int index;
@@ -226,20 +240,21 @@ void *hash_find(struct hashtable_t *hash, void *keydata)
 	return NULL;
 }
 
-/* remove bucket (this might be used in hash_iterate() if you already found the bucket
- * you want to delete and don't need the overhead to find it again with hash_remove().
- * But usually, you don't want to use this function, as it fiddles with hash-internals. */
+/* remove bucket (this might be used in hash_iterate() if you already found
+ * the bucket you want to delete and don't need the overhead to find it again
+ * with hash_remove(). But usually, you don't want to use this function, as it
+ * fiddles with hash-internals. */
 void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t)
 {
 	void *data_save;
 
-	data_save = hash_it_t->bucket->data;	/* save the pointer to the data */
+	/* save the pointer to the data */
+	data_save = hash_it_t->bucket->data;
 
-	if (hash_it_t->prev_bucket != NULL) {
+	if (hash_it_t->prev_bucket != NULL)
 		hash_it_t->prev_bucket->next = hash_it_t->bucket->next;
-	} else if (hash_it_t->first_bucket != NULL) {
+	else if (hash_it_t->first_bucket != NULL)
 		(*hash_it_t->first_bucket) = hash_it_t->bucket->next;
-	}
 
 	debugFree(hash_it_t->bucket, 1306);
 
@@ -261,7 +276,12 @@ void *hash_remove(struct hashtable_t *hash, void *data)
 
 	while (hash_it_t.bucket != NULL) {
 		if (hash->compare(hash_it_t.bucket->data, data)) {
-			hash_it_t.first_bucket = (hash_it_t.bucket == hash->table[hash_it_t.index] ? &hash->table[ hash_it_t.index ] : NULL);
+			int bucket_same;
+			bucket_same = (hash_it_t.bucket ==
+				       hash->table[hash_it_t.index]);
+			hash_it_t.first_bucket = (bucket_same ?
+						  &hash->table[hash_it_t.index] :
+						  NULL);
 			return hash_remove_bucket(hash, &hash_it_t);
 		}
 
@@ -272,7 +292,8 @@ void *hash_remove(struct hashtable_t *hash, void *data)
 	return NULL;
 }
 
-/* resize the hash, returns the pointer to the new hash or NULL on error. removes the old hash on success. */
+/* resize the hash, returns the pointer to the new hash or NULL on error.
+ * removes the old hash on success. */
 struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
 {
 	struct hashtable_t *new_hash;
@@ -292,8 +313,9 @@ struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
 			bucket = bucket->next;
 		}
 	}
-
-	hash_delete(hash, NULL);	/* remove hash and eventual overflow buckets but not the content itself. */
+	/* remove hash and eventual overflow buckets but not the
+	 * content itself. */
+	hash_delete(hash, NULL);
 	return new_hash;
 }
 
diff --git a/hash.h b/hash.h
index 66d7162..1682eb8 100644
--- a/hash.h
+++ b/hash.h
@@ -28,8 +28,8 @@ typedef int (*hashdata_choose_cb)(void *, int);
 typedef void (*hashdata_free_cb)(void *);
 
 struct element_t {
-	void *data;						/* pointer to the data */
-	struct element_t *next;			/* overflow bucket pointer */
+	void *data;		/* pointer to the data */
+	struct element_t *next;	/* overflow bucket pointer */
 };
 
 struct hash_it_t {
@@ -40,56 +40,69 @@ struct hash_it_t {
 };
 
 struct hashtable_t {
-	struct element_t **table;					/* the hashtable itself, with the buckets */
-	int elements;								/* number of elements registered */
-	int size;									/* size of hashtable */
-	hashdata_compare_cb compare;			    /* callback to a compare function.
-												 * should compare 2 element datas for their keys,
-												 * return 0 if same and not 0 if not same */
-	hashdata_choose_cb choose;					/* the hashfunction, should return an index based
-												 * on the key in the data of the first argument
-												 * and the size the second */
+	struct element_t **table;	/* the hashtable itself, with the
+					 * buckets */
+	int elements;			/* number of elements registered */
+	int size;			/* size of hashtable */
+	hashdata_compare_cb compare;	/* callback to a compare function.
+					 * should compare 2 element datas for
+					 * their keys, return 0 if same and not
+					 *  0 if not same */
+	hashdata_choose_cb choose;	/* the hashfunction, should return an
+					 * index based on the key in the data
+					 * of the first argument and the size
+					 * the second */
 };
 
 /* clears the hash */
-void 				 hash_init(struct hashtable_t *hash);
+void hash_init(struct hashtable_t *hash);
 
 /* allocates and clears the hash */
-struct hashtable_t	*hash_new(int size, hashdata_compare_cb compare, hashdata_choose_cb choose);
+struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
+				 hashdata_choose_cb choose);
 
-/* remove bucket (this might be used in hash_iterate() if you already found the bucket
- * you want to delete and don't need the overhead to find it again with hash_remove().
- * But usually, you don't want to use this function, as it fiddles with hash-internals. */
-void 				*hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t);
+/* remove bucket (this might be used in hash_iterate() if you already found
+ * the bucket you want to delete and don't need the overhead to find it again
+ * with hash_remove(). But usually, you don't want to use this function, as it
+ * fiddles with hash-internals. */
+void *hash_remove_bucket(struct hashtable_t *hash,
+			 struct hash_it_t *hash_it_t);
 
 /* remove the hash structure. if hashdata_free_cb != NULL,
  * this function will be called to remove the elements inside of the hash.
  * if you don't remove the elements, memory might be leaked. */
-void 				 hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb);
+void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb);
 
 /* free only the hashtable and the hash itself. */
-void 				 hash_destroy(struct hashtable_t *hash);
+void hash_destroy(struct hashtable_t *hash);
 
 /* adds data to the hashtable. returns 0 on success, -1 on error */
-int 				 hash_add(struct hashtable_t *hash, void *data);
+int hash_add(struct hashtable_t *hash, void *data);
 
 /* removes data from hash, if found. returns pointer do data on success,
  * so you can remove the used structure yourself, or NULL on error .
  * data could be the structure you use with just the key filled,
  * we just need the key for comparing. */
-void 				*hash_remove(struct hashtable_t *hash, void *data);
+void *hash_remove(struct hashtable_t *hash, void *data);
 
-/* finds data, based on the key in keydata. returns the found data on success, or NULL on error */
-void 				*hash_find(struct hashtable_t *hash, void *keydata);
+/* finds data, based on the key in keydata. returns the found data on success,
+ * or NULL on error */
+void *hash_find(struct hashtable_t *hash, void *keydata);
 
-/* resize the hash, returns the pointer to the new hash or NULL on error. removes the old hash on success */
-struct hashtable_t	*hash_resize(struct hashtable_t *hash, int size);
+/* resize the hash, returns the pointer to the new hash or NULL on error.
+ * removes the old hash on success */
+struct hashtable_t *hash_resize(struct hashtable_t *hash, int size);
 
 /* print the hash table for debugging */
-void 				 hash_debug( struct hashtable_t *hash);
+void hash_debug(struct hashtable_t *hash);
 
 /* iterate though the hash. first element is selected with iter_in NULL.
- * use the returned iterator to access the elements until hash_it_t returns NULL. */
-struct hash_it_t 	*hash_iterate(struct hashtable_t *hash, struct hash_it_t *iter_in);
+ * use the returned iterator to access the elements until hash_it_t
+ * returns NULL. */
+struct hash_it_t *hash_iterate(struct hashtable_t *hash,
+			       struct hash_it_t *iter_in);
+
+/* free hash_it_t pointer when stopping hash_iterate early */
+void hash_iterate_free(struct hash_it_t *iter_in);
 
 #endif
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 2/7] batmand: Use memleak/error path free implementation of hash_resize
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
@ 2014-06-17 19:55 ` Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 3/7] batmand: Fix bitarray 1 bit shift type Sven Eckelmann
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:55 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

The current implementation of hash_resize uses hash_add directly to initialize
a new hash table. But hash_add has two possible situations when it returns an
error and hash_resize would leak the data:

 * data already exists
 * malloc fails

The check for the duplicated data is not really harmful (beside increasing the
time to re-add elements) but the malloc can potentially return an error. This
malloc is unnecessary and just takes extra time. Instead the bucket from the
old hash table can be re-used.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 hash.c | 72 ++++++++++++++++++++++++++++++++++++++++--------------------------
 1 file changed, 44 insertions(+), 28 deletions(-)

diff --git a/hash.c b/hash.c
index 7271b89..1c551dc 100644
--- a/hash.c
+++ b/hash.c
@@ -63,6 +63,40 @@ void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb)
 	hash_destroy(hash);
 }
 
+/* adds data to the hashtable and reuse bucket.
+ * returns 0 on success, -1 on error  */
+static int hash_add_bucket(struct hashtable_t *hash, void *data,
+			   struct element_t *bucket, int check_duplicate)
+{
+	int index;
+	struct element_t *bucket_it, *prev_bucket = NULL;
+
+	index = hash->choose(data, hash->size);
+	bucket_it = hash->table[index];
+
+	while (bucket_it != NULL) {
+		if (check_duplicate &&
+		    hash->compare(bucket_it->data, data))
+			return -1;
+
+		prev_bucket = bucket_it;
+		bucket_it = bucket_it->next;
+	}
+
+	/* init the new bucket */
+	bucket->data = data;
+	bucket->next = NULL;
+
+	/* and link it */
+	if (prev_bucket == NULL)
+		hash->table[index] = bucket;
+	else
+		prev_bucket->next = bucket;
+
+	hash->elements++;
+	return 0;
+}
+
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash)
 {
@@ -186,19 +220,8 @@ struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
 /* adds data to the hashtable. returns 0 on success, -1 on error */
 int hash_add(struct hashtable_t *hash, void *data)
 {
-	int index;
-	struct element_t *bucket, *prev_bucket = NULL;
-
-	index = hash->choose(data, hash->size);
-	bucket = hash->table[index];
-
-	while (bucket != NULL) {
-		if (hash->compare(bucket->data, data))
-			return -1;
-
-		prev_bucket = bucket;
-		bucket = bucket->next;
-	}
+	int ret;
+	struct element_t *bucket;
 
 	/* found the tail of the list, add new element */
 	bucket = debugMalloc(sizeof(struct element_t), 304);
@@ -206,18 +229,11 @@ int hash_add(struct hashtable_t *hash, void *data)
 	if (!bucket)
 		return -1;
 
-	/* init the new bucket */
-	bucket->data = data;
-	bucket->next = NULL;
-
-	/* and link it */
-	if (prev_bucket == NULL)
-		hash->table[index] = bucket;
-	else
-		prev_bucket->next = bucket;
+	ret = hash_add_bucket(hash, data, bucket, 1);
+	if (ret < 0)
+		debugFree(bucket, 1307);
 
-	hash->elements++;
-	return 0;
+	return ret;
 }
 
 /* finds data, based on the key in keydata. returns the found data on success,
@@ -307,10 +323,10 @@ struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
 
 	/* copy the elements */
 	for (i = 0; i < hash->size; i++) {
-		bucket = hash->table[i];
-		while (bucket != NULL) {
-			hash_add(new_hash, bucket->data);
-			bucket = bucket->next;
+		while (hash->table[i]) {
+			bucket = hash->table[i];
+			hash->table[i] = bucket->next;
+			hash_add_bucket(new_hash, bucket->data, bucket, 0);
 		}
 	}
 	/* remove hash and eventual overflow buckets but not the
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 3/7] batmand: Fix bitarray 1 bit shift type
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
  2014-06-17 19:55 ` [B.A.T.M.A.N.] [PATCH 2/7] batmand: Use memleak/error path free implementation of hash_resize Sven Eckelmann
@ 2014-06-17 19:56 ` Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 4/7] batmand: Free socket when setsockopt failed Sven Eckelmann
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:56 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

The default type of an integer constant is int. This reduced the possible bits
for a shift to 32. But the size of uintmax_t is most likely larger (64 bit).
Thus the upper 32 bit cannot be accessed correctly with this bitarray
implementation.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 bitarray.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/bitarray.c b/bitarray.c
index 133f6e7..354eddd 100644
--- a/bitarray.c
+++ b/bitarray.c
@@ -49,7 +49,7 @@ uint8_t get_bit_status( TYPE_OF_WORD *seq_bits, uint16_t last_seqno, uint16_t cu
 		word_offset= ( last_seqno - curr_seqno ) % WORD_BIT_SIZE;	/* which position in the selected word */
 		word_num   = ( last_seqno - curr_seqno ) / WORD_BIT_SIZE;	/* which word */
 
-		if ( seq_bits[word_num] & 1<<word_offset )   /* get position status */
+		if ( seq_bits[word_num] & (1ULL << word_offset))   /* get position status */
 			return 1;
 		else
 			return 0;
@@ -72,7 +72,7 @@ void bit_mark( TYPE_OF_WORD *seq_bits, int32_t n ) {
 	word_offset= n%WORD_BIT_SIZE;	/* which position in the selected word */
 	word_num   = n/WORD_BIT_SIZE;	/* which word */
 
-	seq_bits[word_num]|= 1<<word_offset;	/* turn the position on */
+	seq_bits[word_num] |= 1ULL << word_offset;	/* turn the position on */
 }
 
 /* shift the packet array p by n places. */
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 4/7] batmand: Free socket when setsockopt failed
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
  2014-06-17 19:55 ` [B.A.T.M.A.N.] [PATCH 2/7] batmand: Use memleak/error path free implementation of hash_resize Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 3/7] batmand: Fix bitarray 1 bit shift type Sven Eckelmann
@ 2014-06-17 19:56 ` Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 5/7] batmand: Don't cause NULL ptr dereference when searching through HNAs Sven Eckelmann
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:56 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

client_to_gw_tun calls setsockopt which can fail. In this case it jumps to the
error handling and cleanup code but doesn't close the udp_sock. This has to be
done to avoid leaking of file descriptors.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 posix/tunnel.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/posix/tunnel.c b/posix/tunnel.c
index 08c56a2..8e7611b 100644
--- a/posix/tunnel.c
+++ b/posix/tunnel.c
@@ -201,7 +201,7 @@ void *client_to_gw_tun(void *arg)
 	sock_opts = 1;
         if (setsockopt(udp_sock, SOL_SOCKET, SO_REUSEADDR, &sock_opts, sizeof(sock_opts)) < 0) {
                debug_output(0, "Error - can't set options on udp socket: %s\n", strerror(errno));
-		goto out;
+		goto udp_out;
         }
 
 	if (bind(udp_sock, (struct sockaddr *)&my_addr, sizeof(struct sockaddr_in)) < 0) {
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 5/7] batmand: Don't cause NULL ptr dereference when searching through HNAs
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
                   ` (2 preceding siblings ...)
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 4/7] batmand: Free socket when setsockopt failed Sven Eckelmann
@ 2014-06-17 19:56 ` Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 6/7] batmand: Don't try to schedule_own_packet with no if_incoming Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL Sven Eckelmann
  5 siblings, 0 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:56 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

The _hna_global_add function searches through the list of HNAs to check if
there is already a hna for this orig pointer. Otherwise it is automatically
allocated.

This was implemented by setting hna_orig_ptr to NULL after the comparison. This
will most likely crash the program because the list_for_each_entry
implementation uses hna_orig_ptr to find the next entry before doing the loop
condition check. The next entry cannot be found by dereferencing NULL+epsilon.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 hna.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/hna.c b/hna.c
index f0d64e1..0f28bfe 100644
--- a/hna.c
+++ b/hna.c
@@ -322,6 +322,7 @@ static void _hna_global_add(struct orig_node *orig_node, struct hna_element *hna
 	struct hna_orig_ptr *hna_orig_ptr = NULL;
 	struct orig_node *old_orig_node = NULL;
 	struct hashtable_t *swaphash;
+	int found = 0;
 
 	hna_global_entry = ((struct hna_global_entry *)hash_find(hna_global_hash, hna_element));
 
@@ -354,14 +355,14 @@ static void _hna_global_add(struct orig_node *orig_node, struct hna_element *hna
 		return;
 
 	list_for_each_entry(hna_orig_ptr, &hna_global_entry->orig_list, list) {
-		if (hna_orig_ptr->orig_node == orig_node)
+		if (hna_orig_ptr->orig_node == orig_node) {
+			found = 1;
 			break;
-
-		hna_orig_ptr = NULL;
+		}
 	}
 
 	/* append the given orig node to the list */
-	if (!hna_orig_ptr) {
+	if (!found) {
 		hna_orig_ptr = debugMalloc(sizeof(struct hna_orig_ptr), 704);
 
 		if (!hna_orig_ptr)
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 6/7] batmand: Don't try to schedule_own_packet with no if_incoming
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
                   ` (3 preceding siblings ...)
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 5/7] batmand: Don't cause NULL ptr dereference when searching through HNAs Sven Eckelmann
@ 2014-06-17 19:56 ` Sven Eckelmann
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL Sven Eckelmann
  5 siblings, 0 replies; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:56 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

send_outstanding_packets checks if a forw_node has a correct if_incoming.
Otherwise it jumps to packet_free to deallocate the packet infrastructure. But
this also schedules packets with the batman interfaces as target incoming_if.
This is known to be NULL but is dereferenced in schedule_own_packet.

This NULL dereference should be avoided.

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 schedule.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/schedule.c b/schedule.c
index 3e65d68..03cdb78 100644
--- a/schedule.c
+++ b/schedule.c
@@ -359,7 +359,7 @@ void send_outstanding_packets(uint32_t curr_time)
 
 packet_free:	list_del((struct list_head *)&forw_list, forw_pos, &forw_list);
 
-		if (forw_node->own)
+		if (forw_node->own && forw_node->if_incoming)
 			schedule_own_packet(forw_node->if_incoming);
 
 		debugFree(forw_node->pack_buff, 1501);
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL
  2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
                   ` (4 preceding siblings ...)
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 6/7] batmand: Don't try to schedule_own_packet with no if_incoming Sven Eckelmann
@ 2014-06-17 19:56 ` Sven Eckelmann
  2014-06-29 17:25   ` Elektra
  5 siblings, 1 reply; 8+ messages in thread
From: Sven Eckelmann @ 2014-06-17 19:56 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

The update_route functions first stores the orig_node->router in an extra
variable and later checks if orig_node is NULL. This is not only a potential
cause of a crash but can also cause new compilers to drop the NULL check
completely [1].

[1] https://gcc.gnu.org/onlinedocs/gcc-3.4.3/gcc/Optimize-Options.html#index-fdelete_002dnull_002dpointer_002dchecks-399

Signed-off-by: Sven Eckelmann <sven@narfation.org>
---
 batman.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/batman.c b/batman.c
index 89b3bf1..c3dfa7f 100644
--- a/batman.c
+++ b/batman.c
@@ -353,10 +353,9 @@ void update_routes(struct orig_node *orig_node, struct neigh_node *neigh_node, u
 	prof_start(PROF_update_routes);
 	debug_output(4, "update_routes() \n");
 
-	old_router = orig_node->router;
-
 	/* also handles orig_node->router == NULL and neigh_node == NULL */
 	if ((orig_node != NULL) && (orig_node->router != neigh_node)) {
+		old_router = orig_node->router;
 
 		if ( ( orig_node != NULL ) && ( neigh_node != NULL ) ) {
 			addr_to_string( orig_node->orig, orig_str, ADDR_STR_LEN );
@@ -415,7 +414,8 @@ void update_routes(struct orig_node *orig_node, struct neigh_node *neigh_node, u
 		orig_node->router = neigh_node;
 
 	} else if (orig_node != NULL) {
-		hna_global_update(orig_node, hna_recv_buff, hna_buff_len, old_router);
+		hna_global_update(orig_node, hna_recv_buff, hna_buff_len,
+				  orig_node->router);
 	}
 
 	prof_stop(PROF_update_routes);
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL
  2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL Sven Eckelmann
@ 2014-06-29 17:25   ` Elektra
  0 siblings, 0 replies; 8+ messages in thread
From: Elektra @ 2014-06-29 17:25 UTC (permalink / raw)
  To: b.a.t.m.a.n; +Cc: Sven Eckelmann

Hello Sven –

I have applied your pending patches in 036ae...

Many thanks for your kind support!

Cheers,
Elektra

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-06-29 17:25 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-06-17 19:55 [B.A.T.M.A.N.] [PATCH 1/7] batmand: Import hash table version from alfred Sven Eckelmann
2014-06-17 19:55 ` [B.A.T.M.A.N.] [PATCH 2/7] batmand: Use memleak/error path free implementation of hash_resize Sven Eckelmann
2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 3/7] batmand: Fix bitarray 1 bit shift type Sven Eckelmann
2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 4/7] batmand: Free socket when setsockopt failed Sven Eckelmann
2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 5/7] batmand: Don't cause NULL ptr dereference when searching through HNAs Sven Eckelmann
2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 6/7] batmand: Don't try to schedule_own_packet with no if_incoming Sven Eckelmann
2014-06-17 19:56 ` [B.A.T.M.A.N.] [PATCH 7/7] batmand: Don't dereference orig_node before checking for NULL Sven Eckelmann
2014-06-29 17:25   ` Elektra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.