All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH nft 1/3] mnl: add nft_batch_continue() helper
@ 2014-07-15 15:47 Pablo Neira Ayuso
  2014-07-15 15:47 ` [PATCH nft 2/3] mnl: add nft_nlmsg_batch_current() helper Pablo Neira Ayuso
  2014-07-15 15:47 ` [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages Pablo Neira Ayuso
  0 siblings, 2 replies; 5+ messages in thread
From: Pablo Neira Ayuso @ 2014-07-15 15:47 UTC (permalink / raw)
  To: netfilter-devel; +Cc: kaber, ycnian

Save some LOC with this function that wrap typical handling
after pushing the netlink message into the batch.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 src/mnl.c |   45 +++++++++++++++++----------------------------
 1 file changed, 17 insertions(+), 28 deletions(-)

diff --git a/src/mnl.c b/src/mnl.c
index a843fdc..d9ccff3 100644
--- a/src/mnl.c
+++ b/src/mnl.c
@@ -126,6 +126,12 @@ static void mnl_batch_page_add(void)
 	mnl_nlmsg_batch_next(batch);
 }
 
+static void nft_batch_continue(void)
+{
+	if (!mnl_nlmsg_batch_next(batch))
+		mnl_batch_page_add();
+}
+
 static uint32_t mnl_batch_put(int type)
 {
 	struct nlmsghdr *nlh;
@@ -140,9 +146,7 @@ static uint32_t mnl_batch_put(int type)
 	nfg->nfgen_family = AF_INET;
 	nfg->version = NFNETLINK_V0;
 	nfg->res_id = NFNL_SUBSYS_NFTABLES;
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return nlh->nlmsg_seq;
 }
@@ -303,8 +307,7 @@ int mnl_nft_rule_batch_add(struct nft_rule *nlr, unsigned int flags,
 			NLM_F_CREATE | flags, seqnum);
 
 	nft_rule_nlmsg_build_payload(nlh, nlr);
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -320,9 +323,7 @@ int mnl_nft_rule_batch_del(struct nft_rule *nlr, unsigned int flags,
 			0, seqnum);
 
 	nft_rule_nlmsg_build_payload(nlh, nlr);
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -431,9 +432,7 @@ int mnl_nft_chain_batch_add(struct mnl_socket *nf_sock, struct nft_chain *nlc,
 			nft_chain_attr_get_u32(nlc, NFT_CHAIN_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
 	nft_chain_nlmsg_build_payload(nlh, nlc);
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -462,9 +461,7 @@ int mnl_nft_chain_batch_del(struct mnl_socket *nf_sock, struct nft_chain *nlc,
 			nft_chain_attr_get_u32(nlc, NFT_CHAIN_ATTR_FAMILY),
 			NLM_F_ACK, seqnum);
 	nft_chain_nlmsg_build_payload(nlh, nlc);
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -560,9 +557,7 @@ int mnl_nft_table_batch_add(struct mnl_socket *nf_sock, struct nft_table *nlt,
 			nft_table_attr_get_u32(nlt, NFT_TABLE_ATTR_FAMILY),
 			flags, seqnum);
 	nft_table_nlmsg_build_payload(nlh, nlt);
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -591,9 +586,7 @@ int mnl_nft_table_batch_del(struct mnl_socket *nf_sock, struct nft_table *nlt,
 			nft_table_attr_get_u32(nlt, NFT_TABLE_ATTR_FAMILY),
 			NLM_F_ACK, seqnum);
 	nft_table_nlmsg_build_payload(nlh, nlt);
-
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -709,8 +702,7 @@ int mnl_nft_set_batch_add(struct mnl_socket *nf_sock, struct nft_set *nls,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
 	nft_set_nlmsg_build_payload(nlh, nls);
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -725,8 +717,7 @@ int mnl_nft_set_batch_del(struct mnl_socket *nf_sock, struct nft_set *nls,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			flags, seqnum);
 	nft_set_nlmsg_build_payload(nlh, nls);
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -853,8 +844,7 @@ int mnl_nft_setelem_batch_add(struct mnl_socket *nf_sock, struct nft_set *nls,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
 	nft_set_elems_nlmsg_build_payload(nlh, nls);
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
@@ -869,8 +859,7 @@ int mnl_nft_setelem_batch_del(struct mnl_socket *nf_sock, struct nft_set *nls,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			0, seqnum);
 	nft_set_elems_nlmsg_build_payload(nlh, nls);
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	nft_batch_continue();
 
 	return 0;
 }
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH nft 2/3] mnl: add nft_nlmsg_batch_current() helper
  2014-07-15 15:47 [PATCH nft 1/3] mnl: add nft_batch_continue() helper Pablo Neira Ayuso
@ 2014-07-15 15:47 ` Pablo Neira Ayuso
  2014-07-15 15:47 ` [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages Pablo Neira Ayuso
  1 sibling, 0 replies; 5+ messages in thread
From: Pablo Neira Ayuso @ 2014-07-15 15:47 UTC (permalink / raw)
  To: netfilter-devel; +Cc: kaber, ycnian

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 src/mnl.c |   31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git a/src/mnl.c b/src/mnl.c
index d9ccff3..e4f0339 100644
--- a/src/mnl.c
+++ b/src/mnl.c
@@ -106,13 +106,18 @@ struct batch_page {
 	struct mnl_nlmsg_batch *batch;
 };
 
+static void *nft_nlmsg_batch_current(void)
+{
+	return mnl_nlmsg_batch_current(batch);
+}
+
 static void mnl_batch_page_add(void)
 {
 	struct batch_page *batch_page;
 	struct nlmsghdr *last_nlh;
 
 	/* Get the last message not fitting in the batch */
-	last_nlh = mnl_nlmsg_batch_current(batch);
+	last_nlh = nft_nlmsg_batch_current();
 
 	batch_page = xmalloc(sizeof(struct batch_page));
 	batch_page->batch = batch;
@@ -121,7 +126,7 @@ static void mnl_batch_page_add(void)
 	batch = mnl_batch_alloc();
 
 	/* Copy the last message not fitting to the new batch page */
-	memcpy(mnl_nlmsg_batch_current(batch), last_nlh, last_nlh->nlmsg_len);
+	memcpy(nft_nlmsg_batch_current(), last_nlh, last_nlh->nlmsg_len);
 	/* No overflow may happen as this is a new empty batch page */
 	mnl_nlmsg_batch_next(batch);
 }
@@ -137,7 +142,7 @@ static uint32_t mnl_batch_put(int type)
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfg;
 
-	nlh = mnl_nlmsg_put_header(mnl_nlmsg_batch_current(batch));
+	nlh = mnl_nlmsg_put_header(nft_nlmsg_batch_current());
 	nlh->nlmsg_type = type;
 	nlh->nlmsg_flags = NLM_F_REQUEST;
 	nlh->nlmsg_seq = mnl_seqnum_alloc();
@@ -301,7 +306,7 @@ int mnl_nft_rule_batch_add(struct nft_rule *nlr, unsigned int flags,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_rule_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_rule_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_NEWRULE,
 			nft_rule_attr_get_u32(nlr, NFT_RULE_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
@@ -317,7 +322,7 @@ int mnl_nft_rule_batch_del(struct nft_rule *nlr, unsigned int flags,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_rule_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_rule_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_DELRULE,
 			nft_rule_attr_get_u32(nlr, NFT_RULE_ATTR_FAMILY),
 			0, seqnum);
@@ -427,7 +432,7 @@ int mnl_nft_chain_batch_add(struct mnl_socket *nf_sock, struct nft_chain *nlc,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_chain_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_chain_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_NEWCHAIN,
 			nft_chain_attr_get_u32(nlc, NFT_CHAIN_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
@@ -456,7 +461,7 @@ int mnl_nft_chain_batch_del(struct mnl_socket *nf_sock, struct nft_chain *nlc,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_chain_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_chain_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_DELCHAIN,
 			nft_chain_attr_get_u32(nlc, NFT_CHAIN_ATTR_FAMILY),
 			NLM_F_ACK, seqnum);
@@ -552,7 +557,7 @@ int mnl_nft_table_batch_add(struct mnl_socket *nf_sock, struct nft_table *nlt,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_table_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_table_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_NEWTABLE,
 			nft_table_attr_get_u32(nlt, NFT_TABLE_ATTR_FAMILY),
 			flags, seqnum);
@@ -581,7 +586,7 @@ int mnl_nft_table_batch_del(struct mnl_socket *nf_sock, struct nft_table *nlt,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_table_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_table_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_DELTABLE,
 			nft_table_attr_get_u32(nlt, NFT_TABLE_ATTR_FAMILY),
 			NLM_F_ACK, seqnum);
@@ -697,7 +702,7 @@ int mnl_nft_set_batch_add(struct mnl_socket *nf_sock, struct nft_set *nls,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_set_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_NEWSET,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
@@ -712,7 +717,7 @@ int mnl_nft_set_batch_del(struct mnl_socket *nf_sock, struct nft_set *nls,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_set_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_DELSET,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			flags, seqnum);
@@ -839,7 +844,7 @@ int mnl_nft_setelem_batch_add(struct mnl_socket *nf_sock, struct nft_set *nls,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_set_elem_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_set_elem_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_NEWSETELEM,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			NLM_F_CREATE | flags, seqnum);
@@ -854,7 +859,7 @@ int mnl_nft_setelem_batch_del(struct mnl_socket *nf_sock, struct nft_set *nls,
 {
 	struct nlmsghdr *nlh;
 
-	nlh = nft_set_elem_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+	nlh = nft_set_elem_nlmsg_build_hdr(nft_nlmsg_batch_current(),
 			NFT_MSG_DELSETELEM,
 			nft_set_attr_get_u32(nls, NFT_SET_ATTR_FAMILY),
 			0, seqnum);
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages
  2014-07-15 15:47 [PATCH nft 1/3] mnl: add nft_batch_continue() helper Pablo Neira Ayuso
  2014-07-15 15:47 ` [PATCH nft 2/3] mnl: add nft_nlmsg_batch_current() helper Pablo Neira Ayuso
@ 2014-07-15 15:47 ` Pablo Neira Ayuso
  2014-07-22 11:02   ` Yanchuan Nian
  1 sibling, 1 reply; 5+ messages in thread
From: Pablo Neira Ayuso @ 2014-07-15 15:47 UTC (permalink / raw)
  To: netfilter-devel; +Cc: kaber, ycnian

This patch reworks the batching logic in several aspects:

1) New batch pages are now always added into the batch page list in
   first place. Then, in the send path, if the last batch page is
   empty, it is removed from the batch list.

2) nft_batch_page_add() is only called if the current batch page is
   full. Therefore, it is guaranteed to find a valid netlink message
   in the batch page when moving the tail that didn't fit into a new
   batch page.

3) The batch paging is initialized and released from the nft_netlink()
   path.

4) No more global struct mnl_nlmsg_batch *batch that points to the
   current batch page. Instead, it is retrieved from the tail of the
   batch list, which indicates the current batch page.

This patch fixes a crash due to access of uninitialized memory area in
due to calling batch_page_add() with an empty batch in the send path,
and the memleak of the batch page contents. Reported in:

http://patchwork.ozlabs.org/patch/367085/
http://patchwork.ozlabs.org/patch/367774/

The patch is larger, but this save the zeroing of the batch page area.

Reported-by: Yanchuan Nian <ycnian@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 src/main.c    |   14 +++++-----
 src/mnl.c     |   82 ++++++++++++++++++++++++++++++++-------------------------
 src/netlink.c |    1 -
 3 files changed, 54 insertions(+), 43 deletions(-)

diff --git a/src/main.c b/src/main.c
index 30ea2c6..552a35d 100644
--- a/src/main.c
+++ b/src/main.c
@@ -173,6 +173,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
 	bool batch_supported = netlink_batch_supported();
 	int ret = 0;
 
+	mnl_batch_init();
+
 	batch_seqnum = mnl_batch_begin();
 	list_for_each_entry(cmd, &state->cmds, list) {
 		memset(&ctx, 0, sizeof(ctx));
@@ -182,16 +184,14 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
 		init_list_head(&ctx.list);
 		ret = do_command(&ctx, cmd);
 		if (ret < 0)
-			return ret;
+			goto out;
 	}
 	mnl_batch_end();
 
-	if (mnl_batch_ready())
-		ret = netlink_batch_send(&err_list);
-	else {
-		mnl_batch_reset();
+	if (!mnl_batch_ready())
 		goto out;
-	}
+
+	ret = netlink_batch_send(&err_list);
 
 	list_for_each_entry_safe(err, tmp, &err_list, head) {
 		list_for_each_entry(cmd, &state->cmds, list) {
@@ -208,6 +208,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
 		}
 	}
 out:
+	mnl_batch_reset();
+
 	list_for_each_entry_safe(cmd, next, &state->cmds, list) {
 		list_del(&cmd->list);
 		cmd_free(cmd);
diff --git a/src/mnl.c b/src/mnl.c
index e4f0339..42b11f5 100644
--- a/src/mnl.c
+++ b/src/mnl.c
@@ -82,8 +82,6 @@ nft_mnl_talk(struct mnl_socket *nf_sock, const void *data, unsigned int len,
  */
 #define BATCH_PAGE_SIZE getpagesize() * 32
 
-static struct mnl_nlmsg_batch *batch;
-
 static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
 {
 	static char *buf;
@@ -93,11 +91,6 @@ static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
 	return mnl_nlmsg_batch_start(buf, BATCH_PAGE_SIZE);
 }
 
-void mnl_batch_init(void)
-{
-	batch = mnl_batch_alloc();
-}
-
 static LIST_HEAD(batch_page_list);
 static int batch_num_pages;
 
@@ -106,35 +99,53 @@ struct batch_page {
 	struct mnl_nlmsg_batch *batch;
 };
 
+void mnl_batch_init(void)
+{
+	struct batch_page *batch_page;
+
+	batch_page = xmalloc(sizeof(struct batch_page));
+	batch_page->batch = mnl_batch_alloc();
+	batch_num_pages++;
+	list_add_tail(&batch_page->head, &batch_page_list);
+}
+
+static struct batch_page *nft_batch_page_current(void)
+{
+	return list_entry(batch_page_list.prev, struct batch_page, head);
+}
+
 static void *nft_nlmsg_batch_current(void)
 {
-	return mnl_nlmsg_batch_current(batch);
+	return mnl_nlmsg_batch_current(nft_batch_page_current()->batch);
 }
 
-static void mnl_batch_page_add(void)
+static void nft_batch_page_add(void)
 {
-	struct batch_page *batch_page;
 	struct nlmsghdr *last_nlh;
 
 	/* Get the last message not fitting in the batch */
 	last_nlh = nft_nlmsg_batch_current();
-
-	batch_page = xmalloc(sizeof(struct batch_page));
-	batch_page->batch = batch;
-	list_add_tail(&batch_page->head, &batch_page_list);
-	batch_num_pages++;
-	batch = mnl_batch_alloc();
-
+	/* Add new batch page */
+	mnl_batch_init();
 	/* Copy the last message not fitting to the new batch page */
 	memcpy(nft_nlmsg_batch_current(), last_nlh, last_nlh->nlmsg_len);
 	/* No overflow may happen as this is a new empty batch page */
-	mnl_nlmsg_batch_next(batch);
+	mnl_nlmsg_batch_next(nft_batch_page_current()->batch);
+}
+
+static void nft_batch_page_release(struct batch_page *batch_page)
+{
+	list_del(&batch_page->head);
+	xfree(mnl_nlmsg_batch_head(batch_page->batch));
+	mnl_nlmsg_batch_stop(batch_page->batch);
+	xfree(batch_page);
+	batch_num_pages--;
 }
 
 static void nft_batch_continue(void)
 {
-	if (!mnl_nlmsg_batch_next(batch))
-		mnl_batch_page_add();
+	if (!mnl_nlmsg_batch_next(nft_batch_page_current()->batch))
+		nft_batch_page_add();
 }
 
 static uint32_t mnl_batch_put(int type)
@@ -171,12 +182,16 @@ bool mnl_batch_ready(void)
 	/* Check if the batch only contains the initial and trailing batch
 	 * messages. In that case, the batch is empty.
 	 */
-	return mnl_nlmsg_batch_size(batch) != (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
+	return mnl_nlmsg_batch_size(nft_batch_page_current()->batch) !=
+	       (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
 }
 
 void mnl_batch_reset(void)
 {
-	mnl_nlmsg_batch_reset(batch);
+	struct batch_page *batch_page, *next;
+
+	list_for_each_entry_safe(batch_page, next, &batch_page_list, head)
+		nft_batch_page_release(batch_page);
 }
 
 static void mnl_err_list_node_add(struct list_head *err_list, int error,
@@ -226,12 +241,12 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
 		.msg_iov	= iov,
 		.msg_iovlen	= batch_num_pages,
 	};
-	struct batch_page *batch_page, *next;
+	struct batch_page *batch_page;
 	int i = 0;
 
 	mnl_set_sndbuffer(nl);
 
-	list_for_each_entry_safe(batch_page, next, &batch_page_list, head) {
+	list_for_each_entry(batch_page, &batch_page_list, head) {
 		iov[i].iov_base = mnl_nlmsg_batch_head(batch_page->batch);
 		iov[i].iov_len = mnl_nlmsg_batch_size(batch_page->batch);
 		i++;
@@ -243,10 +258,6 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
 					  sizeof(struct nfgenmsg));
 		}
 #endif
-		list_del(&batch_page->head);
-		xfree(batch_page->batch);
-		xfree(batch_page);
-		batch_num_pages--;
 	}
 
 	return sendmsg(mnl_socket_get_fd(nl), &msg, 0);
@@ -262,12 +273,13 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
 		.tv_usec	= 0
 	};
 
-	if (!mnl_nlmsg_batch_is_empty(batch))
-		mnl_batch_page_add();
+	/* Remove last page from the batch if it's empty */
+	if (mnl_nlmsg_batch_is_empty(nft_batch_page_current()->batch))
+		nft_batch_page_release(nft_batch_page_current());
 
 	ret = mnl_nft_socket_sendmsg(nl);
 	if (ret == -1)
-		goto err;
+		return -1;
 
 	FD_ZERO(&readfds);
 	FD_SET(fd, &readfds);
@@ -275,14 +287,14 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
 	/* receive and digest all the acknowledgments from the kernel. */
 	ret = select(fd+1, &readfds, NULL, NULL, &tv);
 	if (ret == -1)
-		goto err;
+		return -1;
 
 	while (ret > 0 && FD_ISSET(fd, &readfds)) {
 		struct nlmsghdr *nlh = (struct nlmsghdr *)rcv_buf;
 
 		ret = mnl_socket_recvfrom(nl, rcv_buf, sizeof(rcv_buf));
 		if (ret == -1)
-			goto err;
+			return -1;
 
 		ret = mnl_cb_run(rcv_buf, ret, 0, portid, NULL, NULL);
 		/* Continue on error, make sure we get all acknowledgments */
@@ -291,13 +303,11 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
 
 		ret = select(fd+1, &readfds, NULL, NULL, &tv);
 		if (ret == -1)
-			goto err;
+			return -1;
 
 		FD_ZERO(&readfds);
 		FD_SET(fd, &readfds);
 	}
-err:
-	mnl_nlmsg_batch_reset(batch);
 	return ret;
 }
 
diff --git a/src/netlink.c b/src/netlink.c
index afad5a4..0176cb4 100644
--- a/src/netlink.c
+++ b/src/netlink.c
@@ -59,7 +59,6 @@ static void __init netlink_open_sock(void)
 {
 	nf_sock = nfsock_open();
 	fcntl(mnl_socket_get_fd(nf_sock), F_SETFL, O_NONBLOCK);
-	mnl_batch_init();
 }
 
 static void __exit netlink_close_sock(void)
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages
  2014-07-15 15:47 ` [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages Pablo Neira Ayuso
@ 2014-07-22 11:02   ` Yanchuan Nian
  2014-07-22 12:18     ` Yanchuan Nian
  0 siblings, 1 reply; 5+ messages in thread
From: Yanchuan Nian @ 2014-07-22 11:02 UTC (permalink / raw)
  To: Pablo Neira Ayuso; +Cc: netfilter-devel, kaber

On Tue, Jul 15, 2014 at 05:47:18PM +0200, Pablo Neira Ayuso wrote:
> This patch reworks the batching logic in several aspects:
> 
> 1) New batch pages are now always added into the batch page list in
>    first place. Then, in the send path, if the last batch page is
>    empty, it is removed from the batch list.
> 
> 2) nft_batch_page_add() is only called if the current batch page is
>    full. Therefore, it is guaranteed to find a valid netlink message
>    in the batch page when moving the tail that didn't fit into a new
>    batch page.
> 
> 3) The batch paging is initialized and released from the nft_netlink()
>    path.
> 
> 4) No more global struct mnl_nlmsg_batch *batch that points to the
>    current batch page. Instead, it is retrieved from the tail of the
>    batch list, which indicates the current batch page.
> 
> This patch fixes a crash due to access of uninitialized memory area in
> due to calling batch_page_add() with an empty batch in the send path,
> and the memleak of the batch page contents. Reported in:
> 
> http://patchwork.ozlabs.org/patch/367085/
> http://patchwork.ozlabs.org/patch/367774/
> 
> The patch is larger, but this save the zeroing of the batch page area.

Tested, it works find. Just a little question: is it better to release
all batches but the current after each message sending?

> 
> Reported-by: Yanchuan Nian <ycnian@gmail.com>
> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> ---
>  src/main.c    |   14 +++++-----
>  src/mnl.c     |   82 ++++++++++++++++++++++++++++++++-------------------------
>  src/netlink.c |    1 -
>  3 files changed, 54 insertions(+), 43 deletions(-)
> 
> diff --git a/src/main.c b/src/main.c
> index 30ea2c6..552a35d 100644
> --- a/src/main.c
> +++ b/src/main.c
> @@ -173,6 +173,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
>  	bool batch_supported = netlink_batch_supported();
>  	int ret = 0;
>  
> +	mnl_batch_init();
> +
>  	batch_seqnum = mnl_batch_begin();
>  	list_for_each_entry(cmd, &state->cmds, list) {
>  		memset(&ctx, 0, sizeof(ctx));
> @@ -182,16 +184,14 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
>  		init_list_head(&ctx.list);
>  		ret = do_command(&ctx, cmd);
>  		if (ret < 0)
> -			return ret;
> +			goto out;
>  	}
>  	mnl_batch_end();
>  
> -	if (mnl_batch_ready())
> -		ret = netlink_batch_send(&err_list);
> -	else {
> -		mnl_batch_reset();
> +	if (!mnl_batch_ready())
>  		goto out;
> -	}
> +
> +	ret = netlink_batch_send(&err_list);
>  
>  	list_for_each_entry_safe(err, tmp, &err_list, head) {
>  		list_for_each_entry(cmd, &state->cmds, list) {
> @@ -208,6 +208,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
>  		}
>  	}
>  out:
> +	mnl_batch_reset();
> +
>  	list_for_each_entry_safe(cmd, next, &state->cmds, list) {
>  		list_del(&cmd->list);
>  		cmd_free(cmd);
> diff --git a/src/mnl.c b/src/mnl.c
> index e4f0339..42b11f5 100644
> --- a/src/mnl.c
> +++ b/src/mnl.c
> @@ -82,8 +82,6 @@ nft_mnl_talk(struct mnl_socket *nf_sock, const void *data, unsigned int len,
>   */
>  #define BATCH_PAGE_SIZE getpagesize() * 32
>  
> -static struct mnl_nlmsg_batch *batch;
> -
>  static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
>  {
>  	static char *buf;
> @@ -93,11 +91,6 @@ static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
>  	return mnl_nlmsg_batch_start(buf, BATCH_PAGE_SIZE);
>  }
>  
> -void mnl_batch_init(void)
> -{
> -	batch = mnl_batch_alloc();
> -}
> -
>  static LIST_HEAD(batch_page_list);
>  static int batch_num_pages;
>  
> @@ -106,35 +99,53 @@ struct batch_page {
>  	struct mnl_nlmsg_batch *batch;
>  };
>  
> +void mnl_batch_init(void)
> +{
> +	struct batch_page *batch_page;
> +
> +	batch_page = xmalloc(sizeof(struct batch_page));
> +	batch_page->batch = mnl_batch_alloc();
> +	batch_num_pages++;
> +	list_add_tail(&batch_page->head, &batch_page_list);
> +}
> +
> +static struct batch_page *nft_batch_page_current(void)
> +{
> +	return list_entry(batch_page_list.prev, struct batch_page, head);
> +}
> +
>  static void *nft_nlmsg_batch_current(void)
>  {
> -	return mnl_nlmsg_batch_current(batch);
> +	return mnl_nlmsg_batch_current(nft_batch_page_current()->batch);
>  }
>  
> -static void mnl_batch_page_add(void)
> +static void nft_batch_page_add(void)
>  {
> -	struct batch_page *batch_page;
>  	struct nlmsghdr *last_nlh;
>  
>  	/* Get the last message not fitting in the batch */
>  	last_nlh = nft_nlmsg_batch_current();
> -
> -	batch_page = xmalloc(sizeof(struct batch_page));
> -	batch_page->batch = batch;
> -	list_add_tail(&batch_page->head, &batch_page_list);
> -	batch_num_pages++;
> -	batch = mnl_batch_alloc();
> -
> +	/* Add new batch page */
> +	mnl_batch_init();
>  	/* Copy the last message not fitting to the new batch page */
>  	memcpy(nft_nlmsg_batch_current(), last_nlh, last_nlh->nlmsg_len);
>  	/* No overflow may happen as this is a new empty batch page */
> -	mnl_nlmsg_batch_next(batch);
> +	mnl_nlmsg_batch_next(nft_batch_page_current()->batch);
> +}
> +
> +static void nft_batch_page_release(struct batch_page *batch_page)
> +{
> +	list_del(&batch_page->head);
> +	xfree(mnl_nlmsg_batch_head(batch_page->batch));
> +	mnl_nlmsg_batch_stop(batch_page->batch);
> +	xfree(batch_page);
> +	batch_num_pages--;
>  }
>  
>  static void nft_batch_continue(void)
>  {
> -	if (!mnl_nlmsg_batch_next(batch))
> -		mnl_batch_page_add();
> +	if (!mnl_nlmsg_batch_next(nft_batch_page_current()->batch))
> +		nft_batch_page_add();
>  }
>  
>  static uint32_t mnl_batch_put(int type)
> @@ -171,12 +182,16 @@ bool mnl_batch_ready(void)
>  	/* Check if the batch only contains the initial and trailing batch
>  	 * messages. In that case, the batch is empty.
>  	 */
> -	return mnl_nlmsg_batch_size(batch) != (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
> +	return mnl_nlmsg_batch_size(nft_batch_page_current()->batch) !=
> +	       (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
>  }
>  
>  void mnl_batch_reset(void)
>  {
> -	mnl_nlmsg_batch_reset(batch);
> +	struct batch_page *batch_page, *next;
> +
> +	list_for_each_entry_safe(batch_page, next, &batch_page_list, head)
> +		nft_batch_page_release(batch_page);
>  }
>  
>  static void mnl_err_list_node_add(struct list_head *err_list, int error,
> @@ -226,12 +241,12 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
>  		.msg_iov	= iov,
>  		.msg_iovlen	= batch_num_pages,
>  	};
> -	struct batch_page *batch_page, *next;
> +	struct batch_page *batch_page;
>  	int i = 0;
>  
>  	mnl_set_sndbuffer(nl);
>  
> -	list_for_each_entry_safe(batch_page, next, &batch_page_list, head) {
> +	list_for_each_entry(batch_page, &batch_page_list, head) {
>  		iov[i].iov_base = mnl_nlmsg_batch_head(batch_page->batch);
>  		iov[i].iov_len = mnl_nlmsg_batch_size(batch_page->batch);
>  		i++;
> @@ -243,10 +258,6 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
>  					  sizeof(struct nfgenmsg));
>  		}
>  #endif
> -		list_del(&batch_page->head);
> -		xfree(batch_page->batch);
> -		xfree(batch_page);
> -		batch_num_pages--;
>  	}
>  
>  	return sendmsg(mnl_socket_get_fd(nl), &msg, 0);
> @@ -262,12 +273,13 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
>  		.tv_usec	= 0
>  	};
>  
> -	if (!mnl_nlmsg_batch_is_empty(batch))
> -		mnl_batch_page_add();
> +	/* Remove last page from the batch if it's empty */
> +	if (mnl_nlmsg_batch_is_empty(nft_batch_page_current()->batch))
> +		nft_batch_page_release(nft_batch_page_current());
>  
>  	ret = mnl_nft_socket_sendmsg(nl);
>  	if (ret == -1)
> -		goto err;
> +		return -1;
>  
>  	FD_ZERO(&readfds);
>  	FD_SET(fd, &readfds);
> @@ -275,14 +287,14 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
>  	/* receive and digest all the acknowledgments from the kernel. */
>  	ret = select(fd+1, &readfds, NULL, NULL, &tv);
>  	if (ret == -1)
> -		goto err;
> +		return -1;
>  
>  	while (ret > 0 && FD_ISSET(fd, &readfds)) {
>  		struct nlmsghdr *nlh = (struct nlmsghdr *)rcv_buf;
>  
>  		ret = mnl_socket_recvfrom(nl, rcv_buf, sizeof(rcv_buf));
>  		if (ret == -1)
> -			goto err;
> +			return -1;
>  
>  		ret = mnl_cb_run(rcv_buf, ret, 0, portid, NULL, NULL);
>  		/* Continue on error, make sure we get all acknowledgments */
> @@ -291,13 +303,11 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
>  
>  		ret = select(fd+1, &readfds, NULL, NULL, &tv);
>  		if (ret == -1)
> -			goto err;
> +			return -1;
>  
>  		FD_ZERO(&readfds);
>  		FD_SET(fd, &readfds);
>  	}
> -err:
> -	mnl_nlmsg_batch_reset(batch);
>  	return ret;
>  }
>  
> diff --git a/src/netlink.c b/src/netlink.c
> index afad5a4..0176cb4 100644
> --- a/src/netlink.c
> +++ b/src/netlink.c
> @@ -59,7 +59,6 @@ static void __init netlink_open_sock(void)
>  {
>  	nf_sock = nfsock_open();
>  	fcntl(mnl_socket_get_fd(nf_sock), F_SETFL, O_NONBLOCK);
> -	mnl_batch_init();
>  }
>  
>  static void __exit netlink_close_sock(void)
> -- 
> 1.7.10.4

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages
  2014-07-22 11:02   ` Yanchuan Nian
@ 2014-07-22 12:18     ` Yanchuan Nian
  0 siblings, 0 replies; 5+ messages in thread
From: Yanchuan Nian @ 2014-07-22 12:18 UTC (permalink / raw)
  To: Pablo Neira Ayuso; +Cc: netfilter-devel, kaber

On Tue, Jul 22, 2014 at 07:02:03PM +0800, Yanchuan Nian wrote:
> On Tue, Jul 15, 2014 at 05:47:18PM +0200, Pablo Neira Ayuso wrote:
> > This patch reworks the batching logic in several aspects:
> > 
> > 1) New batch pages are now always added into the batch page list in
> >    first place. Then, in the send path, if the last batch page is
> >    empty, it is removed from the batch list.
> > 
> > 2) nft_batch_page_add() is only called if the current batch page is
> >    full. Therefore, it is guaranteed to find a valid netlink message
> >    in the batch page when moving the tail that didn't fit into a new
> >    batch page.
> > 
> > 3) The batch paging is initialized and released from the nft_netlink()
> >    path.
> > 
> > 4) No more global struct mnl_nlmsg_batch *batch that points to the
> >    current batch page. Instead, it is retrieved from the tail of the
> >    batch list, which indicates the current batch page.
> > 
> > This patch fixes a crash due to access of uninitialized memory area in
> > due to calling batch_page_add() with an empty batch in the send path,
> > and the memleak of the batch page contents. Reported in:
> > 
> > http://patchwork.ozlabs.org/patch/367085/
> > http://patchwork.ozlabs.org/patch/367774/
> > 
> > The patch is larger, but this save the zeroing of the batch page area.
> 
> Tested, it works find. Just a little question: is it better to release
> all batches but the current after each message sending?
> 
My misunderstanding. No questions now.

> > 
> > Reported-by: Yanchuan Nian <ycnian@gmail.com>
> > Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> > ---
> >  src/main.c    |   14 +++++-----
> >  src/mnl.c     |   82 ++++++++++++++++++++++++++++++++-------------------------
> >  src/netlink.c |    1 -
> >  3 files changed, 54 insertions(+), 43 deletions(-)
> > 
> > diff --git a/src/main.c b/src/main.c
> > index 30ea2c6..552a35d 100644
> > --- a/src/main.c
> > +++ b/src/main.c
> > @@ -173,6 +173,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
> >  	bool batch_supported = netlink_batch_supported();
> >  	int ret = 0;
> >  
> > +	mnl_batch_init();
> > +
> >  	batch_seqnum = mnl_batch_begin();
> >  	list_for_each_entry(cmd, &state->cmds, list) {
> >  		memset(&ctx, 0, sizeof(ctx));
> > @@ -182,16 +184,14 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
> >  		init_list_head(&ctx.list);
> >  		ret = do_command(&ctx, cmd);
> >  		if (ret < 0)
> > -			return ret;
> > +			goto out;
> >  	}
> >  	mnl_batch_end();
> >  
> > -	if (mnl_batch_ready())
> > -		ret = netlink_batch_send(&err_list);
> > -	else {
> > -		mnl_batch_reset();
> > +	if (!mnl_batch_ready())
> >  		goto out;
> > -	}
> > +
> > +	ret = netlink_batch_send(&err_list);
> >  
> >  	list_for_each_entry_safe(err, tmp, &err_list, head) {
> >  		list_for_each_entry(cmd, &state->cmds, list) {
> > @@ -208,6 +208,8 @@ static int nft_netlink(struct parser_state *state, struct list_head *msgs)
> >  		}
> >  	}
> >  out:
> > +	mnl_batch_reset();
> > +
> >  	list_for_each_entry_safe(cmd, next, &state->cmds, list) {
> >  		list_del(&cmd->list);
> >  		cmd_free(cmd);
> > diff --git a/src/mnl.c b/src/mnl.c
> > index e4f0339..42b11f5 100644
> > --- a/src/mnl.c
> > +++ b/src/mnl.c
> > @@ -82,8 +82,6 @@ nft_mnl_talk(struct mnl_socket *nf_sock, const void *data, unsigned int len,
> >   */
> >  #define BATCH_PAGE_SIZE getpagesize() * 32
> >  
> > -static struct mnl_nlmsg_batch *batch;
> > -
> >  static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
> >  {
> >  	static char *buf;
> > @@ -93,11 +91,6 @@ static struct mnl_nlmsg_batch *mnl_batch_alloc(void)
> >  	return mnl_nlmsg_batch_start(buf, BATCH_PAGE_SIZE);
> >  }
> >  
> > -void mnl_batch_init(void)
> > -{
> > -	batch = mnl_batch_alloc();
> > -}
> > -
> >  static LIST_HEAD(batch_page_list);
> >  static int batch_num_pages;
> >  
> > @@ -106,35 +99,53 @@ struct batch_page {
> >  	struct mnl_nlmsg_batch *batch;
> >  };
> >  
> > +void mnl_batch_init(void)
> > +{
> > +	struct batch_page *batch_page;
> > +
> > +	batch_page = xmalloc(sizeof(struct batch_page));
> > +	batch_page->batch = mnl_batch_alloc();
> > +	batch_num_pages++;
> > +	list_add_tail(&batch_page->head, &batch_page_list);
> > +}
> > +
> > +static struct batch_page *nft_batch_page_current(void)
> > +{
> > +	return list_entry(batch_page_list.prev, struct batch_page, head);
> > +}
> > +
> >  static void *nft_nlmsg_batch_current(void)
> >  {
> > -	return mnl_nlmsg_batch_current(batch);
> > +	return mnl_nlmsg_batch_current(nft_batch_page_current()->batch);
> >  }
> >  
> > -static void mnl_batch_page_add(void)
> > +static void nft_batch_page_add(void)
> >  {
> > -	struct batch_page *batch_page;
> >  	struct nlmsghdr *last_nlh;
> >  
> >  	/* Get the last message not fitting in the batch */
> >  	last_nlh = nft_nlmsg_batch_current();
> > -
> > -	batch_page = xmalloc(sizeof(struct batch_page));
> > -	batch_page->batch = batch;
> > -	list_add_tail(&batch_page->head, &batch_page_list);
> > -	batch_num_pages++;
> > -	batch = mnl_batch_alloc();
> > -
> > +	/* Add new batch page */
> > +	mnl_batch_init();
> >  	/* Copy the last message not fitting to the new batch page */
> >  	memcpy(nft_nlmsg_batch_current(), last_nlh, last_nlh->nlmsg_len);
> >  	/* No overflow may happen as this is a new empty batch page */
> > -	mnl_nlmsg_batch_next(batch);
> > +	mnl_nlmsg_batch_next(nft_batch_page_current()->batch);
> > +}
> > +
> > +static void nft_batch_page_release(struct batch_page *batch_page)
> > +{
> > +	list_del(&batch_page->head);
> > +	xfree(mnl_nlmsg_batch_head(batch_page->batch));
> > +	mnl_nlmsg_batch_stop(batch_page->batch);
> > +	xfree(batch_page);
> > +	batch_num_pages--;
> >  }
> >  
> >  static void nft_batch_continue(void)
> >  {
> > -	if (!mnl_nlmsg_batch_next(batch))
> > -		mnl_batch_page_add();
> > +	if (!mnl_nlmsg_batch_next(nft_batch_page_current()->batch))
> > +		nft_batch_page_add();
> >  }
> >  
> >  static uint32_t mnl_batch_put(int type)
> > @@ -171,12 +182,16 @@ bool mnl_batch_ready(void)
> >  	/* Check if the batch only contains the initial and trailing batch
> >  	 * messages. In that case, the batch is empty.
> >  	 */
> > -	return mnl_nlmsg_batch_size(batch) != (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
> > +	return mnl_nlmsg_batch_size(nft_batch_page_current()->batch) !=
> > +	       (NLMSG_HDRLEN+sizeof(struct nfgenmsg)) * 2;
> >  }
> >  
> >  void mnl_batch_reset(void)
> >  {
> > -	mnl_nlmsg_batch_reset(batch);
> > +	struct batch_page *batch_page, *next;
> > +
> > +	list_for_each_entry_safe(batch_page, next, &batch_page_list, head)
> > +		nft_batch_page_release(batch_page);
> >  }
> >  
> >  static void mnl_err_list_node_add(struct list_head *err_list, int error,
> > @@ -226,12 +241,12 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
> >  		.msg_iov	= iov,
> >  		.msg_iovlen	= batch_num_pages,
> >  	};
> > -	struct batch_page *batch_page, *next;
> > +	struct batch_page *batch_page;
> >  	int i = 0;
> >  
> >  	mnl_set_sndbuffer(nl);
> >  
> > -	list_for_each_entry_safe(batch_page, next, &batch_page_list, head) {
> > +	list_for_each_entry(batch_page, &batch_page_list, head) {
> >  		iov[i].iov_base = mnl_nlmsg_batch_head(batch_page->batch);
> >  		iov[i].iov_len = mnl_nlmsg_batch_size(batch_page->batch);
> >  		i++;
> > @@ -243,10 +258,6 @@ static ssize_t mnl_nft_socket_sendmsg(const struct mnl_socket *nl)
> >  					  sizeof(struct nfgenmsg));
> >  		}
> >  #endif
> > -		list_del(&batch_page->head);
> > -		xfree(batch_page->batch);
> > -		xfree(batch_page);
> > -		batch_num_pages--;
> >  	}
> >  
> >  	return sendmsg(mnl_socket_get_fd(nl), &msg, 0);
> > @@ -262,12 +273,13 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
> >  		.tv_usec	= 0
> >  	};
> >  
> > -	if (!mnl_nlmsg_batch_is_empty(batch))
> > -		mnl_batch_page_add();
> > +	/* Remove last page from the batch if it's empty */
> > +	if (mnl_nlmsg_batch_is_empty(nft_batch_page_current()->batch))
> > +		nft_batch_page_release(nft_batch_page_current());
> >  
> >  	ret = mnl_nft_socket_sendmsg(nl);
> >  	if (ret == -1)
> > -		goto err;
> > +		return -1;
> >  
> >  	FD_ZERO(&readfds);
> >  	FD_SET(fd, &readfds);
> > @@ -275,14 +287,14 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
> >  	/* receive and digest all the acknowledgments from the kernel. */
> >  	ret = select(fd+1, &readfds, NULL, NULL, &tv);
> >  	if (ret == -1)
> > -		goto err;
> > +		return -1;
> >  
> >  	while (ret > 0 && FD_ISSET(fd, &readfds)) {
> >  		struct nlmsghdr *nlh = (struct nlmsghdr *)rcv_buf;
> >  
> >  		ret = mnl_socket_recvfrom(nl, rcv_buf, sizeof(rcv_buf));
> >  		if (ret == -1)
> > -			goto err;
> > +			return -1;
> >  
> >  		ret = mnl_cb_run(rcv_buf, ret, 0, portid, NULL, NULL);
> >  		/* Continue on error, make sure we get all acknowledgments */
> > @@ -291,13 +303,11 @@ int mnl_batch_talk(struct mnl_socket *nl, struct list_head *err_list)
> >  
> >  		ret = select(fd+1, &readfds, NULL, NULL, &tv);
> >  		if (ret == -1)
> > -			goto err;
> > +			return -1;
> >  
> >  		FD_ZERO(&readfds);
> >  		FD_SET(fd, &readfds);
> >  	}
> > -err:
> > -	mnl_nlmsg_batch_reset(batch);
> >  	return ret;
> >  }
> >  
> > diff --git a/src/netlink.c b/src/netlink.c
> > index afad5a4..0176cb4 100644
> > --- a/src/netlink.c
> > +++ b/src/netlink.c
> > @@ -59,7 +59,6 @@ static void __init netlink_open_sock(void)
> >  {
> >  	nf_sock = nfsock_open();
> >  	fcntl(mnl_socket_get_fd(nf_sock), F_SETFL, O_NONBLOCK);
> > -	mnl_batch_init();
> >  }
> >  
> >  static void __exit netlink_close_sock(void)
> > -- 
> > 1.7.10.4

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-07-22 12:15 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-07-15 15:47 [PATCH nft 1/3] mnl: add nft_batch_continue() helper Pablo Neira Ayuso
2014-07-15 15:47 ` [PATCH nft 2/3] mnl: add nft_nlmsg_batch_current() helper Pablo Neira Ayuso
2014-07-15 15:47 ` [PATCH nft 3/3] src: rework batching logic to fix possible use of uninitialized pages Pablo Neira Ayuso
2014-07-22 11:02   ` Yanchuan Nian
2014-07-22 12:18     ` Yanchuan Nian

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.