netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: wangyunjian <wangyunjian@huawei.com>
To: <mst@redhat.com>, <jasowang@redhat.com>
Cc: <virtualization@lists.linux-foundation.org>,
	<netdev@vger.kernel.org>, <jerry.lilijun@huawei.com>,
	<chenchanghu@huawei.com>, <xudingke@huawei.com>,
	Yunjian Wang <wangyunjian@huawei.com>
Subject: [PATCH net v2] tun: fix ubuf refcount incorrectly on error path
Date: Wed, 9 Dec 2020 20:41:43 +0800	[thread overview]
Message-ID: <1607517703-18472-1-git-send-email-wangyunjian@huawei.com> (raw)
In-Reply-To: <1606982459-41752-1-git-send-email-wangyunjian@huawei.com>

From: Yunjian Wang <wangyunjian@huawei.com>

After setting callback for ubuf_info of skb, the callback
(vhost_net_zerocopy_callback) will be called to decrease
the refcount when freeing skb. But when an exception occurs
afterwards, the error handling in vhost handle_tx() will
try to decrease the same refcount again. This is wrong and
fix this by delay copying ubuf_info until we're sure
there's no errors.

Fixes: 4477138fa0ae ("tun: properly test for IFF_UP")
Fixes: 90e33d459407 ("tun: enable napi_gro_frags() for TUN/TAP driver")

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
---
v2:
   Updated code, fix by delay copying ubuf_info
---
 drivers/net/tun.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2dc1988a8973..2ea822328e73 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1637,6 +1637,20 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 	return NULL;
 }
 
+/* copy ubuf_info for callback when skb has no error */
+static inline void tun_copy_ubuf_info(struct sk_buff *skb, bool zerocopy, void *msg_control)
+{
+	if (zerocopy) {
+		skb_shinfo(skb)->destructor_arg = msg_control;
+		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+	} else if (msg_control) {
+		struct ubuf_info *uarg = msg_control;
+
+		uarg->callback(uarg, false);
+	}
+}
+
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			    void *msg_control, struct iov_iter *from,
@@ -1812,16 +1826,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		break;
 	}
 
-	/* copy skb_ubuf_info for callback when skb has no error */
-	if (zerocopy) {
-		skb_shinfo(skb)->destructor_arg = msg_control;
-		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
-		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
-	} else if (msg_control) {
-		struct ubuf_info *uarg = msg_control;
-		uarg->callback(uarg, false);
-	}
-
 	skb_reset_network_header(skb);
 	skb_probe_transport_header(skb);
 	skb_record_rx_queue(skb, tfile->queue_index);
@@ -1830,6 +1834,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		struct bpf_prog *xdp_prog;
 		int ret;
 
+		tun_copy_ubuf_info(skb, zerocopy, msg_control);
 		local_bh_disable();
 		rcu_read_lock();
 		xdp_prog = rcu_dereference(tun->xdp_prog);
@@ -1881,6 +1886,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			return -ENOMEM;
 		}
 
+		tun_copy_ubuf_info(skb, zerocopy, msg_control);
 		local_bh_disable();
 		napi_gro_frags(&tfile->napi);
 		local_bh_enable();
@@ -1889,6 +1895,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 		int queue_len;
 
+		tun_copy_ubuf_info(skb, zerocopy, msg_control);
 		spin_lock_bh(&queue->lock);
 		__skb_queue_tail(queue, skb);
 		queue_len = skb_queue_len(queue);
@@ -1899,8 +1906,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
 		local_bh_enable();
 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
+		tun_copy_ubuf_info(skb, zerocopy, msg_control);
 		tun_rx_batched(tun, tfile, skb, more);
 	} else {
+		tun_copy_ubuf_info(skb, zerocopy, msg_control);
 		netif_rx_ni(skb);
 	}
 	rcu_read_unlock();
-- 
2.23.0


  parent reply	other threads:[~2020-12-09 12:42 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-03  8:00 [PATCH net-next] tun: fix ubuf refcount incorrectly on error path wangyunjian
2020-12-04  6:10 ` Jason Wang
2020-12-04 10:22   ` wangyunjian
2020-12-07  3:54     ` Jason Wang
2020-12-07 13:38       ` wangyunjian
2020-12-08  2:32         ` Jason Wang
2020-12-09  9:30           ` Jason Wang
2020-12-09 12:41 ` wangyunjian [this message]
2020-12-09 14:43   ` [PATCH net v2] " Willem de Bruijn
2020-12-12  6:43     ` wangyunjian
2020-12-13  0:17       ` Willem de Bruijn
2020-12-14  1:32         ` Willem de Bruijn
2020-12-14  3:30           ` Jason Wang
2020-12-14  3:54             ` Willem de Bruijn
2020-12-14  3:56               ` Willem de Bruijn
2020-12-14  4:06                 ` Jason Wang
2020-12-14  6:56                   ` wangyunjian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1607517703-18472-1-git-send-email-wangyunjian@huawei.com \
    --to=wangyunjian@huawei.com \
    --cc=chenchanghu@huawei.com \
    --cc=jasowang@redhat.com \
    --cc=jerry.lilijun@huawei.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xudingke@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).