All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v3 0/7] port network layer onto glib
@ 2013-06-27  3:38 Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 1/7] net: force NetQue opaque to be NetClientState Liu Ping Fan
                   ` (6 more replies)
  0 siblings, 7 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

This series only resolve the net core re-entrant problem. And defer
the patches about glib to future.

v2->v3:
  fix netlayer reenter detection by tls
  fix netdev property' nc reference problem

v1->v2:
  introduce netqueue re-entrant detection and defer it to BH


Liu Ping Fan (7):
  net: force NetQue opaque to be NetClientState
  net: distinguish & defer nested call to BH
  net: introduce lock to protect NetQueue
  net: introduce lock to protect NetClientState's peer's access
  net: introduce lock to protect net clients
  net: using refcnt to manage NetClientState
  net: hub use lock to protect ports list

 hw/core/qdev-properties-system.c |  35 ++++++++++++
 include/net/net.h                |  11 ++++
 include/net/queue.h              |   2 +-
 net/hub.c                        |  28 +++++++++-
 net/net.c                        | 113 ++++++++++++++++++++++++++++++++++++---
 net/queue.c                      |  74 ++++++++++++++++++++++---
 net/slirp.c                      |   3 +-
 7 files changed, 250 insertions(+), 16 deletions(-)

-- 
1.8.1.4

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 1/7] net: force NetQue opaque to be NetClientState
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH Liu Ping Fan
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

qemu_net_client_setup() is the only user of qemu_new_net_queue(), which
will pass in NetClientState. By forcing it be a NetClientState, we
can ref/unref NetQueue's owner

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 include/net/queue.h |  2 +-
 net/queue.c         | 10 +++++-----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/net/queue.h b/include/net/queue.h
index fc02b33..1d9e4c3 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -34,7 +34,7 @@ typedef void (NetPacketSent) (NetClientState *sender, ssize_t ret);
 #define QEMU_NET_PACKET_FLAG_NONE  0
 #define QEMU_NET_PACKET_FLAG_RAW  (1<<0)
 
-NetQueue *qemu_new_net_queue(void *opaque);
+NetQueue *qemu_new_net_queue(NetClientState *nc);
 
 void qemu_del_net_queue(NetQueue *queue);
 
diff --git a/net/queue.c b/net/queue.c
index 859d02a..1937345 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -49,7 +49,7 @@ struct NetPacket {
 };
 
 struct NetQueue {
-    void *opaque;
+    NetClientState *nc;
     uint32_t nq_maxlen;
     uint32_t nq_count;
 
@@ -58,13 +58,13 @@ struct NetQueue {
     unsigned delivering : 1;
 };
 
-NetQueue *qemu_new_net_queue(void *opaque)
+NetQueue *qemu_new_net_queue(NetClientState *nc)
 {
     NetQueue *queue;
 
     queue = g_malloc0(sizeof(NetQueue));
 
-    queue->opaque = opaque;
+    queue->nc = nc;
     queue->nq_maxlen = 10000;
     queue->nq_count = 0;
 
@@ -154,7 +154,7 @@ static ssize_t qemu_net_queue_deliver(NetQueue *queue,
     ssize_t ret = -1;
 
     queue->delivering = 1;
-    ret = qemu_deliver_packet(sender, flags, data, size, queue->opaque);
+    ret = qemu_deliver_packet(sender, flags, data, size, queue->nc);
     queue->delivering = 0;
 
     return ret;
@@ -169,7 +169,7 @@ static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
     ssize_t ret = -1;
 
     queue->delivering = 1;
-    ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->opaque);
+    ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->nc);
     queue->delivering = 0;
 
     return ret;
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 1/7] net: force NetQue opaque to be NetClientState Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-07-04  2:40   ` liu ping fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 3/7] net: introduce lock to protect NetQueue Liu Ping Fan
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

When network layer can run on multi-thread, nested call caused by
nc->receive() will raise issue like deadlock. So postpone it to BH.
We distinguish nested call "A->B->A" from parallel call "A->B, B->A"
by using tls variable net_enter. Here A, B stands for a NetClientState.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/queue.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 2 deletions(-)

diff --git a/net/queue.c b/net/queue.c
index 1937345..d508b7a 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -24,6 +24,9 @@
 #include "net/queue.h"
 #include "qemu/queue.h"
 #include "net/net.h"
+#include "block/aio.h"
+#include "qemu/main-loop.h"
+#include "qemu/tls.h"
 
 /* The delivery handler may only return zero if it will call
  * qemu_net_queue_flush() when it determines that it is once again able
@@ -58,6 +61,23 @@ struct NetQueue {
     unsigned delivering : 1;
 };
 
+typedef struct NetQueueBH {
+    QEMUBH *bh;
+    NetClientState *nc;
+} NetQueueBH;
+
+static void qemu_net_queue_send_bh(void *opaque)
+{
+    NetQueueBH *q_bh = opaque;
+    NetQueue *queue = q_bh->nc->send_queue;
+
+    qemu_net_queue_flush(queue);
+    qemu_bh_delete(q_bh->bh);
+    g_slice_free(NetQueueBH, q_bh);
+}
+
+DEFINE_TLS(int, net_enter) = 0;
+
 NetQueue *qemu_new_net_queue(NetClientState *nc)
 {
     NetQueue *queue;
@@ -183,9 +203,20 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
                             NetPacketSent *sent_cb)
 {
     ssize_t ret;
+    int reenter;
 
-    if (queue->delivering || !qemu_can_send_packet(sender)) {
+    reenter = ++tls_var(net_enter);
+    assert(reenter <= 2);
+    if (queue->delivering || !qemu_can_send_packet(sender) ||
+        reenter == 2) {
         qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
+        if (reenter == 2) {
+            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
+            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
+            que_bh->nc = queue->nc;
+            qemu_bh_schedule(que_bh->bh);
+           --tls_var(net_enter);
+        }
         return 0;
     }
 
@@ -196,6 +227,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
     }
 
     qemu_net_queue_flush(queue);
+     --tls_var(net_enter);
 
     return ret;
 }
@@ -208,9 +240,20 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
                                 NetPacketSent *sent_cb)
 {
     ssize_t ret;
+    int reenter;
 
-    if (queue->delivering || !qemu_can_send_packet(sender)) {
+    reenter = ++tls_var(net_enter);
+    assert(reenter <= 2);
+    if (queue->delivering || !qemu_can_send_packet(sender) ||
+        reenter == 2) {
         qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
+        if (reenter == 2) {
+            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
+            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
+            que_bh->nc = queue->nc;
+            qemu_bh_schedule(que_bh->bh);
+           --tls_var(net_enter);
+        }
         return 0;
     }
 
@@ -221,6 +264,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
     }
 
     qemu_net_queue_flush(queue);
+    --tls_var(net_enter);
 
     return ret;
 }
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 3/7] net: introduce lock to protect NetQueue
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 1/7] net: force NetQue opaque to be NetClientState Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

NetQueue will be accessed by nc and its peers at the same time,
need lock to protect it.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 include/net/net.h |  1 +
 net/queue.c       | 13 +++++++++++++
 2 files changed, 14 insertions(+)

diff --git a/include/net/net.h b/include/net/net.h
index 43d85a1..2f72b26 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -5,6 +5,7 @@
 #include "qemu-common.h"
 #include "qapi/qmp/qdict.h"
 #include "qemu/option.h"
+#include "qemu/thread.h"
 #include "net/queue.h"
 #include "migration/vmstate.h"
 #include "qapi-types.h"
diff --git a/net/queue.c b/net/queue.c
index d508b7a..26399a1 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -56,6 +56,7 @@ struct NetQueue {
     uint32_t nq_maxlen;
     uint32_t nq_count;
 
+    QemuMutex lock;
     QTAILQ_HEAD(packets, NetPacket) packets;
 
     unsigned delivering : 1;
@@ -88,6 +89,7 @@ NetQueue *qemu_new_net_queue(NetClientState *nc)
     queue->nq_maxlen = 10000;
     queue->nq_count = 0;
 
+    qemu_mutex_init(&queue->lock);
     QTAILQ_INIT(&queue->packets);
 
     queue->delivering = 0;
@@ -116,7 +118,9 @@ static void qemu_net_queue_append(NetQueue *queue,
 {
     NetPacket *packet;
 
+    qemu_mutex_lock(&queue->lock);
     if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
+        qemu_mutex_unlock(&queue->lock);
         return; /* drop if queue full and no callback */
     }
     packet = g_malloc(sizeof(NetPacket) + size);
@@ -128,6 +132,7 @@ static void qemu_net_queue_append(NetQueue *queue,
 
     queue->nq_count++;
     QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
+    qemu_mutex_unlock(&queue->lock);
 }
 
 static void qemu_net_queue_append_iov(NetQueue *queue,
@@ -141,7 +146,9 @@ static void qemu_net_queue_append_iov(NetQueue *queue,
     size_t max_len = 0;
     int i;
 
+    qemu_mutex_lock(&queue->lock);
     if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
+        qemu_mutex_unlock(&queue->lock);
         return; /* drop if queue full and no callback */
     }
     for (i = 0; i < iovcnt; i++) {
@@ -163,6 +170,7 @@ static void qemu_net_queue_append_iov(NetQueue *queue,
 
     queue->nq_count++;
     QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
+    qemu_mutex_unlock(&queue->lock);
 }
 
 static ssize_t qemu_net_queue_deliver(NetQueue *queue,
@@ -273,6 +281,7 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
 {
     NetPacket *packet, *next;
 
+    qemu_mutex_lock(&queue->lock);
     QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
         if (packet->sender == from) {
             QTAILQ_REMOVE(&queue->packets, packet, entry);
@@ -280,10 +289,12 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
             g_free(packet);
         }
     }
+    qemu_mutex_unlock(&queue->lock);
 }
 
 bool qemu_net_queue_flush(NetQueue *queue)
 {
+    qemu_mutex_lock(&queue->lock);
     while (!QTAILQ_EMPTY(&queue->packets)) {
         NetPacket *packet;
         int ret;
@@ -300,6 +311,7 @@ bool qemu_net_queue_flush(NetQueue *queue)
         if (ret == 0) {
             queue->nq_count++;
             QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
+            qemu_mutex_unlock(&queue->lock);
             return false;
         }
 
@@ -309,5 +321,6 @@ bool qemu_net_queue_flush(NetQueue *queue)
 
         g_free(packet);
     }
+    qemu_mutex_unlock(&queue->lock);
     return true;
 }
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientState's peer's access
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
                   ` (2 preceding siblings ...)
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 3/7] net: introduce lock to protect NetQueue Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients Liu Ping Fan
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

Introduce nc->peer_lock to shield off the race of nc->peer's reader and
deleter. With it, after deleter finish, no new qemu_send_packet_xx()
will append packet to peer->send_queue, therefore no new reference from
packet->sender to nc will exist in nc->peer->send_queue.

The lock sequence: peer_lock then NetQueue->lock inside.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 include/net/net.h |  7 ++++++
 net/net.c         | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 67 insertions(+), 4 deletions(-)

diff --git a/include/net/net.h b/include/net/net.h
index 2f72b26..27aa5cb 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -67,6 +67,11 @@ struct NetClientState {
     NetClientInfo *info;
     int link_down;
     QTAILQ_ENTRY(NetClientState) next;
+    /* protect the race access of peer only between reader and writer.
+     * to resolve the writer's race condition, resort on biglock.
+     * Lock sequence: holding peer_lock outside, then NetQueue->lock inside.
+     */
+    QemuMutex peer_lock;
     NetClientState *peer;
     NetQueue *send_queue;
     char *model;
@@ -79,6 +84,7 @@ struct NetClientState {
 
 typedef struct NICState {
     NetClientState *ncs;
+    NetClientState **pending_peer;
     NICConf *conf;
     void *opaque;
     bool peer_deleted;
@@ -106,6 +112,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
                                               const char *client_str);
 typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
 void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_send_packet_nolock(NetClientState *sender);
 int qemu_can_send_packet(NetClientState *nc);
 ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
                           int iovcnt);
diff --git a/net/net.c b/net/net.c
index 43a74e4..6fc50d8 100644
--- a/net/net.c
+++ b/net/net.c
@@ -204,6 +204,7 @@ static void qemu_net_client_setup(NetClientState *nc,
         nc->peer = peer;
         peer->peer = nc;
     }
+    qemu_mutex_init(&nc->peer_lock);
     QTAILQ_INSERT_TAIL(&net_clients, nc, next);
 
     nc->send_queue = qemu_new_net_queue(nc);
@@ -243,6 +244,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
     nic->ncs = (void *)nic + info->size;
     nic->conf = conf;
     nic->opaque = opaque;
+    nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues);
 
     for (i = 0; i < queues; i++) {
         qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
@@ -301,6 +303,35 @@ static void qemu_free_net_client(NetClientState *nc)
     }
 }
 
+/* Elimate the reference and sync with exit of rx/tx action.
+ * And flush out peer's queue.
+ */
+static void qemu_net_client_detach_flush(NetClientState *nc)
+{
+    NetClientState *peer;
+
+    qemu_mutex_lock(&nc->peer_lock);
+    peer = nc->peer;
+    qemu_mutex_unlock(&nc->peer_lock);
+
+    /* writer of peer's peer field*/
+    if (peer) {
+        /* exclude the race with tx to @nc */
+        qemu_mutex_lock(&peer->peer_lock);
+        peer->peer = NULL;
+        qemu_mutex_unlock(&peer->peer_lock);
+    }
+
+    /* writer of self's peer field*/
+    /*  exclude the race with tx from @nc */
+    qemu_mutex_lock(&nc->peer_lock);
+    nc->peer = NULL;
+    if (peer) {
+        qemu_net_queue_purge(peer->send_queue, nc);
+    }
+    qemu_mutex_unlock(&nc->peer_lock);
+}
+
 void qemu_del_net_client(NetClientState *nc)
 {
     NetClientState *ncs[MAX_QUEUE_NUM];
@@ -331,7 +362,9 @@ void qemu_del_net_client(NetClientState *nc)
         }
 
         for (i = 0; i < queues; i++) {
+            qemu_net_client_detach_flush(ncs[i]);
             qemu_cleanup_net_client(ncs[i]);
+            nic->pending_peer[i] = ncs[i];
         }
 
         return;
@@ -340,6 +373,7 @@ void qemu_del_net_client(NetClientState *nc)
     assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
 
     for (i = 0; i < queues; i++) {
+        qemu_net_client_detach_flush(ncs[i]);
         qemu_cleanup_net_client(ncs[i]);
         qemu_free_net_client(ncs[i]);
     }
@@ -352,17 +386,19 @@ void qemu_del_nic(NICState *nic)
     /* If this is a peer NIC and peer has already been deleted, free it now. */
     if (nic->peer_deleted) {
         for (i = 0; i < queues; i++) {
-            qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
+            qemu_free_net_client(nic->pending_peer[i]);
         }
     }
 
     for (i = queues - 1; i >= 0; i--) {
         NetClientState *nc = qemu_get_subqueue(nic, i);
 
+        qemu_net_client_detach_flush(nc);
         qemu_cleanup_net_client(nc);
         qemu_free_net_client(nc);
     }
 
+    g_free(nic->pending_peer);
     g_free(nic);
 }
 
@@ -379,7 +415,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
     }
 }
 
-int qemu_can_send_packet(NetClientState *sender)
+int qemu_can_send_packet_nolock(NetClientState *sender)
 {
     if (!sender->peer) {
         return 1;
@@ -394,6 +430,16 @@ int qemu_can_send_packet(NetClientState *sender)
     return 1;
 }
 
+int qemu_can_send_packet(NetClientState *sender)
+{
+    int ret = 1;
+
+    qemu_mutex_lock(&sender->peer_lock);
+    qemu_can_send_packet_nolock(sender);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return ret;
+}
+
 ssize_t qemu_deliver_packet(NetClientState *sender,
                             unsigned flags,
                             const uint8_t *data,
@@ -457,19 +503,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
                                                  NetPacketSent *sent_cb)
 {
     NetQueue *queue;
+    ssize_t sz;
 
 #ifdef DEBUG_NET
     printf("qemu_send_packet_async:\n");
     hex_dump(stdout, buf, size);
 #endif
 
+    qemu_mutex_lock(&sender->peer_lock);
     if (sender->link_down || !sender->peer) {
+        qemu_mutex_unlock(&sender->peer_lock);
         return size;
     }
 
     queue = sender->peer->send_queue;
 
-    return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+    sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return sz;
 }
 
 ssize_t qemu_send_packet_async(NetClientState *sender,
@@ -537,16 +588,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
                                 NetPacketSent *sent_cb)
 {
     NetQueue *queue;
+    ssize_t sz;
 
+    qemu_mutex_lock(&sender->peer_lock);
     if (sender->link_down || !sender->peer) {
+        qemu_mutex_unlock(&sender->peer_lock);
         return iov_size(iov, iovcnt);
     }
 
     queue = sender->peer->send_queue;
 
-    return qemu_net_queue_send_iov(queue, sender,
+    sz = qemu_net_queue_send_iov(queue, sender,
                                    QEMU_NET_PACKET_FLAG_NONE,
                                    iov, iovcnt, sent_cb);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return sz;
 }
 
 ssize_t
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
                   ` (3 preceding siblings ...)
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-07-04  2:48   ` liu ping fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 6/7] net: using refcnt to manage NetClientState Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 7/7] net: hub use lock to protect ports list Liu Ping Fan
  6 siblings, 1 reply; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/net.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/net/net.c b/net/net.c
index 6fc50d8..9f951b8 100644
--- a/net/net.c
+++ b/net/net.c
@@ -45,6 +45,7 @@
 # define CONFIG_NET_BRIDGE
 #endif
 
+static QemuMutex net_clients_lock;
 static QTAILQ_HEAD(, NetClientState) net_clients;
 
 int default_net = 1;
@@ -165,6 +166,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
     char buf[256];
     int id = 0;
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         if (nc == nc1) {
             continue;
@@ -173,6 +175,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
             id++;
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 
     snprintf(buf, sizeof(buf), "%s.%d", model, id);
 
@@ -205,7 +208,9 @@ static void qemu_net_client_setup(NetClientState *nc,
         peer->peer = nc;
     }
     qemu_mutex_init(&nc->peer_lock);
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_INSERT_TAIL(&net_clients, nc, next);
+    qemu_mutex_unlock(&net_clients_lock);
 
     nc->send_queue = qemu_new_net_queue(nc);
     nc->destructor = destructor;
@@ -281,7 +286,9 @@ void *qemu_get_nic_opaque(NetClientState *nc)
 
 static void qemu_cleanup_net_client(NetClientState *nc)
 {
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_REMOVE(&net_clients, nc, next);
+    qemu_mutex_unlock(&net_clients_lock);
 
     if (nc->info->cleanup) {
         nc->info->cleanup(nc);
@@ -406,6 +413,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
 {
     NetClientState *nc;
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
             if (nc->queue_index == 0) {
@@ -413,6 +421,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
             }
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 }
 
 int qemu_can_send_packet_nolock(NetClientState *sender)
@@ -615,13 +624,16 @@ NetClientState *qemu_find_netdev(const char *id)
 {
     NetClientState *nc;
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
             continue;
         if (!strcmp(nc->name, id)) {
+            qemu_mutex_unlock(&net_clients_lock);
             return nc;
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 
     return NULL;
 }
@@ -632,6 +644,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
     NetClientState *nc;
     int ret = 0;
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         if (nc->info->type == type) {
             continue;
@@ -643,6 +656,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
             ret++;
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 
     return ret;
 }
@@ -1024,6 +1038,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
 
     net_hub_info(mon);
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         peer = nc->peer;
         type = nc->info->type;
@@ -1041,6 +1056,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
             print_net_client(mon, peer);
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 }
 
 void qmp_set_link(const char *name, bool up, Error **errp)
@@ -1094,6 +1110,7 @@ void net_cleanup(void)
             qemu_del_net_client(nc);
         }
     }
+    qemu_mutex_destroy(&net_clients_lock);
 }
 
 void net_check_clients(void)
@@ -1115,6 +1132,7 @@ void net_check_clients(void)
 
     net_hub_check_clients();
 
+    qemu_mutex_lock(&net_clients_lock);
     QTAILQ_FOREACH(nc, &net_clients, next) {
         if (!nc->peer) {
             fprintf(stderr, "Warning: %s %s has no peer\n",
@@ -1122,6 +1140,7 @@ void net_check_clients(void)
                     "nic" : "netdev", nc->name);
         }
     }
+    qemu_mutex_unlock(&net_clients_lock);
 
     /* Check that all NICs requested via -net nic actually got created.
      * NICs created via -device don't need to be checked here because
@@ -1179,6 +1198,7 @@ int net_init_clients(void)
 #endif
     }
 
+    qemu_mutex_init(&net_clients_lock);
     QTAILQ_INIT(&net_clients);
 
     if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1)
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 6/7] net: using refcnt to manage NetClientState
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
                   ` (4 preceding siblings ...)
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 7/7] net: hub use lock to protect ports list Liu Ping Fan
  6 siblings, 0 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

With refcnt, NetClientState's user can run against deleter.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 hw/core/qdev-properties-system.c | 35 +++++++++++++++++++++++++++++++++++
 include/net/net.h                |  3 +++
 net/hub.c                        |  3 +++
 net/net.c                        | 31 ++++++++++++++++++++++++++++---
 net/queue.c                      |  3 +++
 net/slirp.c                      |  3 ++-
 6 files changed, 74 insertions(+), 4 deletions(-)

diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 0eada32..6632a24 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -181,6 +181,7 @@ static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
     int queues, i = 0;
     int ret;
 
+    /* When return, hold ref for each peer */
     queues = qemu_find_net_clients_except(str, peers,
                                           NET_CLIENT_OPTIONS_KIND_NIC,
                                           MAX_QUEUE_NUM);
@@ -236,10 +237,28 @@ static void set_netdev(Object *obj, Visitor *v, void *opaque,
     set_pointer(obj, v, opaque, parse_netdev, name, errp);
 }
 
+static void release_netdev(Object *obj, const char *name, void *opaque)
+{
+    DeviceState *dev = DEVICE(obj);
+    Property *prop = opaque;
+    NICPeers *peers_ptr = qdev_get_prop_ptr(dev, prop);
+    NICConf *conf = container_of(peers_ptr, NICConf, peers);
+    NetClientState **ptr = peers_ptr->ncs;
+    int i;
+
+    for (i = 0; i < conf->queues; i++) {
+        if (ptr[i]) {
+            netclient_unref(ptr[i]);
+            ptr[i] = NULL;
+        }
+    }
+}
+
 PropertyInfo qdev_prop_netdev = {
     .name  = "netdev",
     .get   = get_netdev,
     .set   = set_netdev,
+    .release = release_netdev,
 };
 
 /* --- vlan --- */
@@ -302,6 +321,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
         return;
     }
 
+    /* inc ref, released when unset property */
     hubport = net_hub_port_find(id);
     if (!hubport) {
         error_set(errp, QERR_INVALID_PARAMETER_VALUE,
@@ -311,11 +331,26 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
     *ptr = hubport;
 }
 
+static void release_vlan(Object *obj, const char *name, void *opaque)
+{
+    DeviceState *dev = DEVICE(obj);
+    Property *prop = opaque;
+    NICPeers *peers_ptr = qdev_get_prop_ptr(dev, prop);
+    /* With vlan property, the peers can not have multi-queue */
+    NetClientState **ptr = &peers_ptr->ncs[0];
+
+    if (*ptr) {
+        netclient_unref(*ptr);
+        ptr = NULL;
+    }
+}
+
 PropertyInfo qdev_prop_vlan = {
     .name  = "vlan",
     .print = print_vlan,
     .get   = get_vlan,
     .set   = set_vlan,
+    .release = release_vlan,
 };
 
 int qdev_prop_set_drive(DeviceState *dev, const char *name,
diff --git a/include/net/net.h b/include/net/net.h
index 27aa5cb..9ab1fc7 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -64,6 +64,7 @@ typedef struct NetClientInfo {
 } NetClientInfo;
 
 struct NetClientState {
+    int ref;
     NetClientInfo *info;
     int link_down;
     QTAILQ_ENTRY(NetClientState) next;
@@ -93,6 +94,8 @@ typedef struct NICState {
 NetClientState *qemu_find_netdev(const char *id);
 int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
                                  NetClientOptionsKind type, int max);
+void netclient_ref(NetClientState *nc);
+void netclient_unref(NetClientState *nc);
 NetClientState *qemu_new_net_client(NetClientInfo *info,
                                     NetClientState *peer,
                                     const char *model,
diff --git a/net/hub.c b/net/hub.c
index df32074..9c6c559 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -201,6 +201,7 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
                 peer = port->nc.peer;
 
                 if (peer && strcmp(peer->name, name) == 0) {
+                    netclient_ref(peer);
                     return peer;
                 }
             }
@@ -223,6 +224,7 @@ NetClientState *net_hub_port_find(int hub_id)
             QLIST_FOREACH(port, &hub->ports, next) {
                 nc = port->nc.peer;
                 if (!nc) {
+                    netclient_ref(&port->nc);
                     return &(port->nc);
                 }
             }
@@ -231,6 +233,7 @@ NetClientState *net_hub_port_find(int hub_id)
     }
 
     nc = net_hub_add_port(hub_id, NULL);
+    netclient_ref(nc);
     return nc;
 }
 
diff --git a/net/net.c b/net/net.c
index 9f951b8..1ff41a7 100644
--- a/net/net.c
+++ b/net/net.c
@@ -206,6 +206,11 @@ static void qemu_net_client_setup(NetClientState *nc,
         assert(!peer->peer);
         nc->peer = peer;
         peer->peer = nc;
+        /* This pair reflects the peers can see each other, they
+         * will be released together in qemu_net_client_detach_flush.
+         */
+        netclient_ref(peer);
+        netclient_ref(nc);
     }
     qemu_mutex_init(&nc->peer_lock);
     qemu_mutex_lock(&net_clients_lock);
@@ -226,6 +231,7 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
     assert(info->size >= sizeof(NetClientState));
 
     nc = g_malloc0(info->size);
+    netclient_ref(nc);
     qemu_net_client_setup(nc, info, peer, model, name,
                           qemu_net_client_destructor);
 
@@ -310,6 +316,18 @@ static void qemu_free_net_client(NetClientState *nc)
     }
 }
 
+void netclient_ref(NetClientState *nc)
+{
+    __sync_add_and_fetch(&nc->ref, 1);
+}
+
+void netclient_unref(NetClientState *nc)
+{
+    if (__sync_sub_and_fetch(&nc->ref, 1) == 0) {
+        qemu_free_net_client(nc);
+    }
+}
+
 /* Elimate the reference and sync with exit of rx/tx action.
  * And flush out peer's queue.
  */
@@ -335,8 +353,10 @@ static void qemu_net_client_detach_flush(NetClientState *nc)
     nc->peer = NULL;
     if (peer) {
         qemu_net_queue_purge(peer->send_queue, nc);
+        netclient_unref(peer);
     }
     qemu_mutex_unlock(&nc->peer_lock);
+    netclient_unref(nc);
 }
 
 void qemu_del_net_client(NetClientState *nc)
@@ -382,7 +402,7 @@ void qemu_del_net_client(NetClientState *nc)
     for (i = 0; i < queues; i++) {
         qemu_net_client_detach_flush(ncs[i]);
         qemu_cleanup_net_client(ncs[i]);
-        qemu_free_net_client(ncs[i]);
+        netclient_unref(ncs[i]);
     }
 }
 
@@ -393,7 +413,7 @@ void qemu_del_nic(NICState *nic)
     /* If this is a peer NIC and peer has already been deleted, free it now. */
     if (nic->peer_deleted) {
         for (i = 0; i < queues; i++) {
-            qemu_free_net_client(nic->pending_peer[i]);
+            netclient_unref(nic->pending_peer[i]);
         }
     }
 
@@ -402,7 +422,7 @@ void qemu_del_nic(NICState *nic)
 
         qemu_net_client_detach_flush(nc);
         qemu_cleanup_net_client(nc);
-        qemu_free_net_client(nc);
+        netclient_unref(nc);
     }
 
     g_free(nic->pending_peer);
@@ -629,6 +649,7 @@ NetClientState *qemu_find_netdev(const char *id)
         if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
             continue;
         if (!strcmp(nc->name, id)) {
+            netclient_ref(nc);
             qemu_mutex_unlock(&net_clients_lock);
             return nc;
         }
@@ -652,6 +673,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
         if (!strcmp(nc->name, id)) {
             if (ret < max) {
                 ncs[ret] = nc;
+                netclient_ref(nc);
             }
             ret++;
         }
@@ -962,9 +984,11 @@ void net_host_device_remove(Monitor *mon, const QDict *qdict)
     }
     if (!net_host_check_device(nc->model)) {
         monitor_printf(mon, "invalid host network device %s\n", device);
+        netclient_unref(nc);
         return;
     }
     qemu_del_net_client(nc);
+    netclient_unref(nc);
 }
 
 void netdev_add(QemuOpts *opts, Error **errp)
@@ -1020,6 +1044,7 @@ void qmp_netdev_del(const char *id, Error **errp)
     }
 
     qemu_del_net_client(nc);
+    netclient_unref(nc);
     qemu_opts_del(opts);
 }
 
diff --git a/net/queue.c b/net/queue.c
index 26399a1..b6905c1 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -74,6 +74,7 @@ static void qemu_net_queue_send_bh(void *opaque)
 
     qemu_net_queue_flush(queue);
     qemu_bh_delete(q_bh->bh);
+    netclient_unref(q_bh->nc);
     g_slice_free(NetQueueBH, q_bh);
 }
 
@@ -222,6 +223,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
             NetQueueBH *que_bh = g_slice_new(NetQueueBH);
             que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
             que_bh->nc = queue->nc;
+            netclient_ref(queue->nc);
             qemu_bh_schedule(que_bh->bh);
            --tls_var(net_enter);
         }
@@ -259,6 +261,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
             NetQueueBH *que_bh = g_slice_new(NetQueueBH);
             que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
             que_bh->nc = queue->nc;
+            netclient_ref(queue->nc);
             qemu_bh_schedule(que_bh->bh);
            --tls_var(net_enter);
         }
diff --git a/net/slirp.c b/net/slirp.c
index b3f35d5..e541548 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -346,7 +346,7 @@ void net_slirp_hostfwd_remove(Monitor *mon, const QDict *qdict)
 
     err = slirp_remove_hostfwd(QTAILQ_FIRST(&slirp_stacks)->slirp, is_udp,
                                host_addr, host_port);
-
+    netclient_unref(&s->nc);
     monitor_printf(mon, "host forwarding rule for %s %s\n", src_str,
                    err ? "not found" : "removed");
     return;
@@ -437,6 +437,7 @@ void net_slirp_hostfwd_add(Monitor *mon, const QDict *qdict)
     }
     if (s) {
         slirp_hostfwd(s, redir_str, 0);
+        netclient_unref(&s->nc);
     }
 
 }
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH v3 7/7] net: hub use lock to protect ports list
  2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
                   ` (5 preceding siblings ...)
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 6/7] net: using refcnt to manage NetClientState Liu Ping Fan
@ 2013-06-27  3:38 ` Liu Ping Fan
  6 siblings, 0 replies; 10+ messages in thread
From: Liu Ping Fan @ 2013-06-27  3:38 UTC (permalink / raw)
  To: qemu-devel; +Cc: mdroth, Stefan Hajnoczi

Hub ports will run on multi-threads, so use lock to protect them.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/hub.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/net/hub.c b/net/hub.c
index 9c6c559..2970f8e 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -37,6 +37,7 @@ struct NetHub {
     int id;
     QLIST_ENTRY(NetHub) next;
     int num_ports;
+    QemuMutex ports_lock;
     QLIST_HEAD(, NetHubPort) ports;
 };
 
@@ -47,6 +48,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
 {
     NetHubPort *port;
 
+    qemu_mutex_lock(&hub->ports_lock);
     QLIST_FOREACH(port, &hub->ports, next) {
         if (port == source_port) {
             continue;
@@ -54,6 +56,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
 
         qemu_send_packet(&port->nc, buf, len);
     }
+    qemu_mutex_unlock(&hub->ports_lock);
     return len;
 }
 
@@ -63,6 +66,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
     NetHubPort *port;
     ssize_t len = iov_size(iov, iovcnt);
 
+    qemu_mutex_lock(&hub->ports_lock);
     QLIST_FOREACH(port, &hub->ports, next) {
         if (port == source_port) {
             continue;
@@ -70,6 +74,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
 
         qemu_sendv_packet(&port->nc, iov, iovcnt);
     }
+    qemu_mutex_unlock(&hub->ports_lock);
     return len;
 }
 
@@ -80,6 +85,7 @@ static NetHub *net_hub_new(int id)
     hub = g_malloc(sizeof(*hub));
     hub->id = id;
     hub->num_ports = 0;
+    qemu_mutex_init(&hub->ports_lock);
     QLIST_INIT(&hub->ports);
 
     QLIST_INSERT_HEAD(&hubs, hub, next);
@@ -93,16 +99,19 @@ static int net_hub_port_can_receive(NetClientState *nc)
     NetHubPort *src_port = DO_UPCAST(NetHubPort, nc, nc);
     NetHub *hub = src_port->hub;
 
+    qemu_mutex_lock(&hub->ports_lock);
     QLIST_FOREACH(port, &hub->ports, next) {
         if (port == src_port) {
             continue;
         }
 
         if (qemu_can_send_packet(&port->nc)) {
+            qemu_mutex_unlock(&hub->ports_lock);
             return 1;
         }
     }
 
+    qemu_mutex_unlock(&hub->ports_lock);
     return 0;
 }
 
@@ -155,8 +164,9 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
     port = DO_UPCAST(NetHubPort, nc, nc);
     port->id = id;
     port->hub = hub;
-
+    qemu_mutex_lock(&hub->ports_lock);
     QLIST_INSERT_HEAD(&hub->ports, port, next);
+    qemu_mutex_unlock(&hub->ports_lock);
 
     return port;
 }
@@ -197,14 +207,17 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
 
     QLIST_FOREACH(hub, &hubs, next) {
         if (hub->id == hub_id) {
+            qemu_mutex_lock(&hub->ports_lock);
             QLIST_FOREACH(port, &hub->ports, next) {
                 peer = port->nc.peer;
 
                 if (peer && strcmp(peer->name, name) == 0) {
                     netclient_ref(peer);
+                    qemu_mutex_unlock(&hub->ports_lock);
                     return peer;
                 }
             }
+            qemu_mutex_unlock(&hub->ports_lock);
         }
     }
     return NULL;
@@ -221,13 +234,16 @@ NetClientState *net_hub_port_find(int hub_id)
 
     QLIST_FOREACH(hub, &hubs, next) {
         if (hub->id == hub_id) {
+            qemu_mutex_lock(&hub->ports_lock);
             QLIST_FOREACH(port, &hub->ports, next) {
                 nc = port->nc.peer;
                 if (!nc) {
                     netclient_ref(&port->nc);
+                    qemu_mutex_unlock(&hub->ports_lock);
                     return &(port->nc);
                 }
             }
+            qemu_mutex_unlock(&hub->ports_lock);
             break;
         }
     }
@@ -247,12 +263,14 @@ void net_hub_info(Monitor *mon)
 
     QLIST_FOREACH(hub, &hubs, next) {
         monitor_printf(mon, "hub %d\n", hub->id);
+        qemu_mutex_lock(&hub->ports_lock);
         QLIST_FOREACH(port, &hub->ports, next) {
             if (port->nc.peer) {
                 monitor_printf(mon, " \\ ");
                 print_net_client(mon, port->nc.peer);
             }
         }
+        qemu_mutex_unlock(&hub->ports_lock);
     }
 }
 
@@ -309,6 +327,7 @@ void net_hub_check_clients(void)
     QLIST_FOREACH(hub, &hubs, next) {
         int has_nic = 0, has_host_dev = 0;
 
+        qemu_mutex_lock(&hub->ports_lock);
         QLIST_FOREACH(port, &hub->ports, next) {
             peer = port->nc.peer;
             if (!peer) {
@@ -331,6 +350,7 @@ void net_hub_check_clients(void)
                 break;
             }
         }
+        qemu_mutex_unlock(&hub->ports_lock);
         if (has_host_dev && !has_nic) {
             fprintf(stderr, "Warning: vlan %d with no nics\n", hub->id);
         }
@@ -346,12 +366,15 @@ bool net_hub_flush(NetClientState *nc)
 {
     NetHubPort *port;
     NetHubPort *source_port = DO_UPCAST(NetHubPort, nc, nc);
+    NetHub *hub = source_port->hub;
     int ret = 0;
 
+    qemu_mutex_lock(&hub->ports_lock);
     QLIST_FOREACH(port, &source_port->hub->ports, next) {
         if (port != source_port) {
             ret += qemu_net_queue_flush(port->nc.send_queue);
         }
     }
+    qemu_mutex_unlock(&hub->ports_lock);
     return ret ? true : false;
 }
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH Liu Ping Fan
@ 2013-07-04  2:40   ` liu ping fan
  0 siblings, 0 replies; 10+ messages in thread
From: liu ping fan @ 2013-07-04  2:40 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, mdroth, Stefan Hajnoczi

On Thu, Jun 27, 2013 at 11:38 AM, Liu Ping Fan <qemulist@gmail.com> wrote:
> When network layer can run on multi-thread, nested call caused by
> nc->receive() will raise issue like deadlock. So postpone it to BH.
> We distinguish nested call "A->B->A" from parallel call "A->B, B->A"
> by using tls variable net_enter. Here A, B stands for a NetClientState.
>
It is a bad commit log as Paolo said! I abstract the scenario and how
this patch fix it.

Once network layer can run on multi-thread, nested call caused by
NetClientState->receive() will raise issue like recursive lock.
For example, the packet feed to net_slirp_receive, will finally cause
that the slirp nc calls its peer's receive method. Hence, if there
is a local lock on the frontend nc's packet sending path, it rise to
recursive lock issue. By pushing out the nested call to receive() to
BH, we can avoid it. (As for slirp, to avoid AB-BA deadlock across
frontend and backend, we will ensure that slirp calls its peer's
receive, after droping locks whatever hold by slirp system)

We distinguish nested call "A->B->A" from parallel call "A->B, B->A"
by using tls variable net_enter. Here A, B stands for a NetClientState.

Thanks and regards
Pingfan

> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  net/queue.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 46 insertions(+), 2 deletions(-)
>
> diff --git a/net/queue.c b/net/queue.c
> index 1937345..d508b7a 100644
> --- a/net/queue.c
> +++ b/net/queue.c
> @@ -24,6 +24,9 @@
>  #include "net/queue.h"
>  #include "qemu/queue.h"
>  #include "net/net.h"
> +#include "block/aio.h"
> +#include "qemu/main-loop.h"
> +#include "qemu/tls.h"
>
>  /* The delivery handler may only return zero if it will call
>   * qemu_net_queue_flush() when it determines that it is once again able
> @@ -58,6 +61,23 @@ struct NetQueue {
>      unsigned delivering : 1;
>  };
>
> +typedef struct NetQueueBH {
> +    QEMUBH *bh;
> +    NetClientState *nc;
> +} NetQueueBH;
> +
> +static void qemu_net_queue_send_bh(void *opaque)
> +{
> +    NetQueueBH *q_bh = opaque;
> +    NetQueue *queue = q_bh->nc->send_queue;
> +
> +    qemu_net_queue_flush(queue);
> +    qemu_bh_delete(q_bh->bh);
> +    g_slice_free(NetQueueBH, q_bh);
> +}
> +
> +DEFINE_TLS(int, net_enter) = 0;
> +
>  NetQueue *qemu_new_net_queue(NetClientState *nc)
>  {
>      NetQueue *queue;
> @@ -183,9 +203,20 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
>                              NetPacketSent *sent_cb)
>  {
>      ssize_t ret;
> +    int reenter;
>
> -    if (queue->delivering || !qemu_can_send_packet(sender)) {
> +    reenter = ++tls_var(net_enter);
> +    assert(reenter <= 2);
> +    if (queue->delivering || !qemu_can_send_packet(sender) ||
> +        reenter == 2) {
>          qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
> +        if (reenter == 2) {
> +            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
> +            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
> +            que_bh->nc = queue->nc;
> +            qemu_bh_schedule(que_bh->bh);
> +           --tls_var(net_enter);
> +        }
>          return 0;
>      }
>
> @@ -196,6 +227,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
>      }
>
>      qemu_net_queue_flush(queue);
> +     --tls_var(net_enter);
>
>      return ret;
>  }
> @@ -208,9 +240,20 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>                                  NetPacketSent *sent_cb)
>  {
>      ssize_t ret;
> +    int reenter;
>
> -    if (queue->delivering || !qemu_can_send_packet(sender)) {
> +    reenter = ++tls_var(net_enter);
> +    assert(reenter <= 2);
> +    if (queue->delivering || !qemu_can_send_packet(sender) ||
> +        reenter == 2) {
>          qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
> +        if (reenter == 2) {
> +            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
> +            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
> +            que_bh->nc = queue->nc;
> +            qemu_bh_schedule(que_bh->bh);
> +           --tls_var(net_enter);
> +        }
>          return 0;
>      }
>
> @@ -221,6 +264,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>      }
>
>      qemu_net_queue_flush(queue);
> +    --tls_var(net_enter);
>
>      return ret;
>  }
> --
> 1.8.1.4
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients
  2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients Liu Ping Fan
@ 2013-07-04  2:48   ` liu ping fan
  0 siblings, 0 replies; 10+ messages in thread
From: liu ping fan @ 2013-07-04  2:48 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, mdroth, Stefan Hajnoczi

On Thu, Jun 27, 2013 at 11:38 AM, Liu Ping Fan <qemulist@gmail.com> wrote:
Append commit log:

NetClientStates come and go into the net_clients in parallel. So use
lock to protect it.

Stefanha,
I will add lock in net_cleanup() as discussed in another thread.


Regards,
Pingfan

> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  net/net.c | 20 ++++++++++++++++++++
>  1 file changed, 20 insertions(+)
>
> diff --git a/net/net.c b/net/net.c
> index 6fc50d8..9f951b8 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -45,6 +45,7 @@
>  # define CONFIG_NET_BRIDGE
>  #endif
>
> +static QemuMutex net_clients_lock;
>  static QTAILQ_HEAD(, NetClientState) net_clients;
>
>  int default_net = 1;
> @@ -165,6 +166,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
>      char buf[256];
>      int id = 0;
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          if (nc == nc1) {
>              continue;
> @@ -173,6 +175,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
>              id++;
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      snprintf(buf, sizeof(buf), "%s.%d", model, id);
>
> @@ -205,7 +208,9 @@ static void qemu_net_client_setup(NetClientState *nc,
>          peer->peer = nc;
>      }
>      qemu_mutex_init(&nc->peer_lock);
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_INSERT_TAIL(&net_clients, nc, next);
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      nc->send_queue = qemu_new_net_queue(nc);
>      nc->destructor = destructor;
> @@ -281,7 +286,9 @@ void *qemu_get_nic_opaque(NetClientState *nc)
>
>  static void qemu_cleanup_net_client(NetClientState *nc)
>  {
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_REMOVE(&net_clients, nc, next);
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      if (nc->info->cleanup) {
>          nc->info->cleanup(nc);
> @@ -406,6 +413,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
>  {
>      NetClientState *nc;
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
>              if (nc->queue_index == 0) {
> @@ -413,6 +421,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
>              }
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>  }
>
>  int qemu_can_send_packet_nolock(NetClientState *sender)
> @@ -615,13 +624,16 @@ NetClientState *qemu_find_netdev(const char *id)
>  {
>      NetClientState *nc;
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
>              continue;
>          if (!strcmp(nc->name, id)) {
> +            qemu_mutex_unlock(&net_clients_lock);
>              return nc;
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      return NULL;
>  }
> @@ -632,6 +644,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
>      NetClientState *nc;
>      int ret = 0;
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          if (nc->info->type == type) {
>              continue;
> @@ -643,6 +656,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
>              ret++;
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      return ret;
>  }
> @@ -1024,6 +1038,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
>
>      net_hub_info(mon);
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          peer = nc->peer;
>          type = nc->info->type;
> @@ -1041,6 +1056,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
>              print_net_client(mon, peer);
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>  }
>
>  void qmp_set_link(const char *name, bool up, Error **errp)
> @@ -1094,6 +1110,7 @@ void net_cleanup(void)
>              qemu_del_net_client(nc);
>          }
>      }
> +    qemu_mutex_destroy(&net_clients_lock);
>  }
>
>  void net_check_clients(void)
> @@ -1115,6 +1132,7 @@ void net_check_clients(void)
>
>      net_hub_check_clients();
>
> +    qemu_mutex_lock(&net_clients_lock);
>      QTAILQ_FOREACH(nc, &net_clients, next) {
>          if (!nc->peer) {
>              fprintf(stderr, "Warning: %s %s has no peer\n",
> @@ -1122,6 +1140,7 @@ void net_check_clients(void)
>                      "nic" : "netdev", nc->name);
>          }
>      }
> +    qemu_mutex_unlock(&net_clients_lock);
>
>      /* Check that all NICs requested via -net nic actually got created.
>       * NICs created via -device don't need to be checked here because
> @@ -1179,6 +1198,7 @@ int net_init_clients(void)
>  #endif
>      }
>
> +    qemu_mutex_init(&net_clients_lock);
>      QTAILQ_INIT(&net_clients);
>
>      if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1)
> --
> 1.8.1.4
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2013-07-04  2:49 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-27  3:38 [Qemu-devel] [PATCH v3 0/7] port network layer onto glib Liu Ping Fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 1/7] net: force NetQue opaque to be NetClientState Liu Ping Fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH Liu Ping Fan
2013-07-04  2:40   ` liu ping fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 3/7] net: introduce lock to protect NetQueue Liu Ping Fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 5/7] net: introduce lock to protect net clients Liu Ping Fan
2013-07-04  2:48   ` liu ping fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 6/7] net: using refcnt to manage NetClientState Liu Ping Fan
2013-06-27  3:38 ` [Qemu-devel] [PATCH v3 7/7] net: hub use lock to protect ports list Liu Ping Fan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.