All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: xen-devel@lists.xen.org
Cc: linux-kernel@vger.kernel.org, sstabellini@kernel.org,
	jgross@suse.com, boris.ostrovsky@oracle.com,
	Stefano Stabellini <stefano@aporeto.com>
Subject: [PATCH v4 05/13] xen/pvcalls: implement connect command
Date: Fri, 15 Sep 2017 16:00:32 -0700	[thread overview]
Message-ID: <1505516440-11111-5-git-send-email-sstabellini@kernel.org> (raw)
In-Reply-To: <1505516440-11111-1-git-send-email-sstabellini@kernel.org>

Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
the active socket.

Introduce fields in struct sock_mapping to keep track of active sockets.
Introduce a waitqueue to allow the frontend to wait on data coming from
the backend on the active socket (recvmsg command).

Two mutexes (one of reads and one for writes) will be used to protect
the active socket in and out rings from concurrent accesses.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
 drivers/xen/pvcalls-front.c | 163 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |   2 +
 2 files changed, 165 insertions(+)

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 1bad1b1..ef511b6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -13,6 +13,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
 
 #include <xen/events.h>
 #include <xen/grant_table.h>
@@ -57,6 +61,18 @@ struct sock_mapping {
 	bool active_socket;
 	struct list_head list;
 	struct socket *sock;
+	union {
+		struct {
+			int irq;
+			grant_ref_t ref;
+			struct pvcalls_data_intf *ring;
+			struct pvcalls_data data;
+			struct mutex in_mutex;
+			struct mutex out_mutex;
+
+			wait_queue_head_t inflight_conn_req;
+		} active;
+	};
 };
 
 static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
@@ -114,6 +130,18 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
+{
+	struct sock_mapping *map = sock_map;
+
+	if (map == NULL)
+		return IRQ_HANDLED;
+
+	wake_up_interruptible(&map->active.inflight_conn_req);
+
+	return IRQ_HANDLED;
+}
+
 int pvcalls_front_socket(struct socket *sock)
 {
 	struct pvcalls_bedata *bedata;
@@ -191,6 +219,133 @@ int pvcalls_front_socket(struct socket *sock)
 	return ret;
 }
 
+static int create_active(struct sock_mapping *map, int *evtchn)
+{
+	void *bytes;
+	int ret = -ENOMEM, irq = -1, i;
+
+	*evtchn = -1;
+	init_waitqueue_head(&map->active.inflight_conn_req);
+
+	map->active.ring = (struct pvcalls_data_intf *)
+		__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	if (map->active.ring == NULL)
+		goto out_error;
+	map->active.ring->ring_order = PVCALLS_RING_ORDER;
+	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					PVCALLS_RING_ORDER);
+	if (bytes == NULL)
+		goto out_error;
+	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+		map->active.ring->ref[i] = gnttab_grant_foreign_access(
+			pvcalls_front_dev->otherend_id,
+			pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
+
+	map->active.ref = gnttab_grant_foreign_access(
+		pvcalls_front_dev->otherend_id,
+		pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
+
+	map->active.data.in = bytes;
+	map->active.data.out = bytes +
+		XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+	ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
+	if (ret)
+		goto out_error;
+	irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
+					0, "pvcalls-frontend", map);
+	if (irq < 0) {
+		ret = irq;
+		goto out_error;
+	}
+
+	map->active.irq = irq;
+	map->active_socket = true;
+	mutex_init(&map->active.in_mutex);
+	mutex_init(&map->active.out_mutex);
+
+	return 0;
+
+out_error:
+	if (irq >= 0)
+		unbind_from_irqhandler(irq, map);
+	else if (*evtchn >= 0)
+		xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
+	kfree(map->active.data.in);
+	kfree(map->active.ring);
+	return ret;
+}
+
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+				int addr_len, int flags)
+{
+	struct pvcalls_bedata *bedata;
+	struct sock_mapping *map = NULL;
+	struct xen_pvcalls_request *req;
+	int notify, req_id, ret, evtchn;
+
+	pvcalls_enter;
+	if (!pvcalls_front_dev) {
+		pvcalls_exit;
+		return -ENETUNREACH;
+	}
+	if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) {
+		pvcalls_exit;
+		return -ENOTSUPP;
+	}
+
+	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+	map = (struct sock_mapping *) sock->sk->sk_send_head;
+	if (!map) {
+		pvcalls_exit;
+		return -ENOTSOCK;
+	}
+
+	spin_lock(&bedata->socket_lock);
+	ret = get_request(bedata, &req_id);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+	ret = create_active(map, &evtchn);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+
+	req = RING_GET_REQUEST(&bedata->ring, req_id);
+	req->req_id = req_id;
+	req->cmd = PVCALLS_CONNECT;
+	req->u.connect.id = (uint64_t)map;
+	memcpy(req->u.connect.addr, addr, sizeof(*addr));
+	req->u.connect.len = addr_len;
+	req->u.connect.flags = flags;
+	req->u.connect.ref = map->active.ref;
+	req->u.connect.evtchn = evtchn;
+
+	map->sock = sock;
+
+	bedata->ring.req_prod_pvt++;
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+	spin_unlock(&bedata->socket_lock);
+
+	if (notify)
+		notify_remote_via_irq(bedata->irq);
+
+	wait_event(bedata->inflight_req,
+		   READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+	ret = bedata->rsp[req_id].ret;
+	/* read ret, then set this rsp slot to be reused */
+	smp_mb();
+	WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+	pvcalls_exit;
+	return ret;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
 	{ "pvcalls" },
 	{ "" }
@@ -207,6 +362,14 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
 	if (bedata->irq >= 0)
 		unbind_from_irqhandler(bedata->irq, dev);
 
+	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+		map->sock->sk->sk_send_head = NULL;
+		if (map->active_socket) {
+			map->active.ring->in_error = -EBADF;
+			wake_up_interruptible(&map->active.inflight_conn_req);
+		}
+	}
+
 	smp_mb();
 	while (atomic_read(&pvcalls_refcount) > 0)
 		cpu_relax();
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index b7dabed..63b0417 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -4,5 +4,7 @@
 #include <linux/net.h>
 
 int pvcalls_front_socket(struct socket *sock);
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+			  int addr_len, int flags);
 
 #endif
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Stefano Stabellini <sstabellini@kernel.org>
To: xen-devel@lists.xen.org
Cc: jgross@suse.com, Stefano Stabellini <stefano@aporeto.com>,
	boris.ostrovsky@oracle.com, sstabellini@kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/13] xen/pvcalls: implement connect command
Date: Fri, 15 Sep 2017 16:00:32 -0700	[thread overview]
Message-ID: <1505516440-11111-5-git-send-email-sstabellini@kernel.org> (raw)
In-Reply-To: <1505516440-11111-1-git-send-email-sstabellini@kernel.org>

Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
the active socket.

Introduce fields in struct sock_mapping to keep track of active sockets.
Introduce a waitqueue to allow the frontend to wait on data coming from
the backend on the active socket (recvmsg command).

Two mutexes (one of reads and one for writes) will be used to protect
the active socket in and out rings from concurrent accesses.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
 drivers/xen/pvcalls-front.c | 163 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |   2 +
 2 files changed, 165 insertions(+)

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 1bad1b1..ef511b6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -13,6 +13,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
 
 #include <xen/events.h>
 #include <xen/grant_table.h>
@@ -57,6 +61,18 @@ struct sock_mapping {
 	bool active_socket;
 	struct list_head list;
 	struct socket *sock;
+	union {
+		struct {
+			int irq;
+			grant_ref_t ref;
+			struct pvcalls_data_intf *ring;
+			struct pvcalls_data data;
+			struct mutex in_mutex;
+			struct mutex out_mutex;
+
+			wait_queue_head_t inflight_conn_req;
+		} active;
+	};
 };
 
 static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
@@ -114,6 +130,18 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
+{
+	struct sock_mapping *map = sock_map;
+
+	if (map == NULL)
+		return IRQ_HANDLED;
+
+	wake_up_interruptible(&map->active.inflight_conn_req);
+
+	return IRQ_HANDLED;
+}
+
 int pvcalls_front_socket(struct socket *sock)
 {
 	struct pvcalls_bedata *bedata;
@@ -191,6 +219,133 @@ int pvcalls_front_socket(struct socket *sock)
 	return ret;
 }
 
+static int create_active(struct sock_mapping *map, int *evtchn)
+{
+	void *bytes;
+	int ret = -ENOMEM, irq = -1, i;
+
+	*evtchn = -1;
+	init_waitqueue_head(&map->active.inflight_conn_req);
+
+	map->active.ring = (struct pvcalls_data_intf *)
+		__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	if (map->active.ring == NULL)
+		goto out_error;
+	map->active.ring->ring_order = PVCALLS_RING_ORDER;
+	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					PVCALLS_RING_ORDER);
+	if (bytes == NULL)
+		goto out_error;
+	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+		map->active.ring->ref[i] = gnttab_grant_foreign_access(
+			pvcalls_front_dev->otherend_id,
+			pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
+
+	map->active.ref = gnttab_grant_foreign_access(
+		pvcalls_front_dev->otherend_id,
+		pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
+
+	map->active.data.in = bytes;
+	map->active.data.out = bytes +
+		XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+	ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
+	if (ret)
+		goto out_error;
+	irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
+					0, "pvcalls-frontend", map);
+	if (irq < 0) {
+		ret = irq;
+		goto out_error;
+	}
+
+	map->active.irq = irq;
+	map->active_socket = true;
+	mutex_init(&map->active.in_mutex);
+	mutex_init(&map->active.out_mutex);
+
+	return 0;
+
+out_error:
+	if (irq >= 0)
+		unbind_from_irqhandler(irq, map);
+	else if (*evtchn >= 0)
+		xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
+	kfree(map->active.data.in);
+	kfree(map->active.ring);
+	return ret;
+}
+
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+				int addr_len, int flags)
+{
+	struct pvcalls_bedata *bedata;
+	struct sock_mapping *map = NULL;
+	struct xen_pvcalls_request *req;
+	int notify, req_id, ret, evtchn;
+
+	pvcalls_enter;
+	if (!pvcalls_front_dev) {
+		pvcalls_exit;
+		return -ENETUNREACH;
+	}
+	if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) {
+		pvcalls_exit;
+		return -ENOTSUPP;
+	}
+
+	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+	map = (struct sock_mapping *) sock->sk->sk_send_head;
+	if (!map) {
+		pvcalls_exit;
+		return -ENOTSOCK;
+	}
+
+	spin_lock(&bedata->socket_lock);
+	ret = get_request(bedata, &req_id);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+	ret = create_active(map, &evtchn);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+
+	req = RING_GET_REQUEST(&bedata->ring, req_id);
+	req->req_id = req_id;
+	req->cmd = PVCALLS_CONNECT;
+	req->u.connect.id = (uint64_t)map;
+	memcpy(req->u.connect.addr, addr, sizeof(*addr));
+	req->u.connect.len = addr_len;
+	req->u.connect.flags = flags;
+	req->u.connect.ref = map->active.ref;
+	req->u.connect.evtchn = evtchn;
+
+	map->sock = sock;
+
+	bedata->ring.req_prod_pvt++;
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+	spin_unlock(&bedata->socket_lock);
+
+	if (notify)
+		notify_remote_via_irq(bedata->irq);
+
+	wait_event(bedata->inflight_req,
+		   READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+	ret = bedata->rsp[req_id].ret;
+	/* read ret, then set this rsp slot to be reused */
+	smp_mb();
+	WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+	pvcalls_exit;
+	return ret;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
 	{ "pvcalls" },
 	{ "" }
@@ -207,6 +362,14 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
 	if (bedata->irq >= 0)
 		unbind_from_irqhandler(bedata->irq, dev);
 
+	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+		map->sock->sk->sk_send_head = NULL;
+		if (map->active_socket) {
+			map->active.ring->in_error = -EBADF;
+			wake_up_interruptible(&map->active.inflight_conn_req);
+		}
+	}
+
 	smp_mb();
 	while (atomic_read(&pvcalls_refcount) > 0)
 		cpu_relax();
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index b7dabed..63b0417 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -4,5 +4,7 @@
 #include <linux/net.h>
 
 int pvcalls_front_socket(struct socket *sock);
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+			  int addr_len, int flags);
 
 #endif
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-09-15 23:02 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-15 23:00 [PATCH v4 00/13] introduce the Xen PV Calls frontend Stefano Stabellini
2017-09-15 23:00 ` [PATCH v4 01/13] xen/pvcalls: introduce the pvcalls xenbus frontend Stefano Stabellini
2017-09-15 23:00   ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 02/13] xen/pvcalls: implement frontend disconnect Stefano Stabellini
2017-09-20 20:32     ` Boris Ostrovsky
2017-09-20 20:32     ` Boris Ostrovsky
2017-10-06 17:51       ` Stefano Stabellini
2017-10-06 20:21         ` Boris Ostrovsky
2017-10-06 20:21         ` Boris Ostrovsky
2017-10-06 20:29           ` Stefano Stabellini
2017-10-06 20:29           ` Stefano Stabellini
2017-10-06 20:31             ` Boris Ostrovsky
2017-10-06 20:31             ` Boris Ostrovsky
2017-10-06 17:51       ` Stefano Stabellini
2017-09-20 20:59     ` Boris Ostrovsky
2017-10-07  0:08       ` Stefano Stabellini
2017-10-07  0:08       ` Stefano Stabellini
2017-09-20 20:59     ` Boris Ostrovsky
2017-09-15 23:00   ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 03/13] xen/pvcalls: connect to the backend Stefano Stabellini
2017-09-15 23:00     ` Stefano Stabellini
2017-09-20 21:08     ` Boris Ostrovsky
2017-09-20 21:08       ` Boris Ostrovsky
2017-09-15 23:00   ` [PATCH v4 04/13] xen/pvcalls: implement socket command and handle events Stefano Stabellini
2017-09-15 23:00     ` Stefano Stabellini
2017-09-21 17:42     ` Boris Ostrovsky
2017-09-21 17:42     ` Boris Ostrovsky
2017-10-06 18:38       ` Stefano Stabellini
2017-10-06 20:36         ` Boris Ostrovsky
2017-10-06 20:36         ` Boris Ostrovsky
2017-10-06 18:38       ` Stefano Stabellini
2017-09-15 23:00   ` Stefano Stabellini [this message]
2017-09-15 23:00     ` [PATCH v4 05/13] xen/pvcalls: implement connect command Stefano Stabellini
2017-09-21 18:28     ` Boris Ostrovsky
2017-09-21 18:28     ` Boris Ostrovsky
2017-10-06 17:44       ` Stefano Stabellini
2017-10-06 17:54         ` Stefano Stabellini
2017-10-06 17:54         ` Stefano Stabellini
2017-10-06 17:44       ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 06/13] xen/pvcalls: implement bind command Stefano Stabellini
2017-09-15 23:00     ` Stefano Stabellini
2017-09-21 19:40     ` Boris Ostrovsky
2017-10-06 20:35       ` Stefano Stabellini
2017-10-06 20:35       ` Stefano Stabellini
2017-09-21 19:40     ` Boris Ostrovsky
2017-09-15 23:00   ` [PATCH v4 07/13] xen/pvcalls: implement listen command Stefano Stabellini
2017-09-15 23:00     ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 08/13] xen/pvcalls: implement accept command Stefano Stabellini
2017-09-15 23:00     ` Stefano Stabellini
2017-09-22  0:00     ` Boris Ostrovsky
2017-09-22  0:00     ` Boris Ostrovsky
2017-10-06 22:06       ` Stefano Stabellini
2017-10-06 22:06       ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 09/13] xen/pvcalls: implement sendmsg Stefano Stabellini
2017-09-22 21:57     ` Boris Ostrovsky
2017-10-06 21:43       ` Stefano Stabellini
2017-10-06 21:43       ` Stefano Stabellini
2017-09-22 21:57     ` Boris Ostrovsky
2017-09-15 23:00   ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 10/13] xen/pvcalls: implement recvmsg Stefano Stabellini
2017-09-22 22:05     ` Boris Ostrovsky
2017-10-06 20:46       ` Stefano Stabellini
2017-10-06 20:46       ` Stefano Stabellini
2017-09-22 22:05     ` Boris Ostrovsky
2017-09-15 23:00   ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 11/13] xen/pvcalls: implement poll command Stefano Stabellini
2017-09-19 15:19     ` Andrea Parri
2017-09-19 15:19     ` Andrea Parri
2017-10-06 23:33       ` Stefano Stabellini
2017-10-06 23:33       ` Stefano Stabellini
2017-09-22 22:27     ` Boris Ostrovsky
2017-09-22 22:27     ` Boris Ostrovsky
2017-10-06 23:39       ` Stefano Stabellini
2017-10-06 23:39       ` Stefano Stabellini
2017-09-15 23:00   ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 12/13] xen/pvcalls: implement release command Stefano Stabellini
2017-09-15 23:00   ` Stefano Stabellini
2017-09-22 22:43     ` Boris Ostrovsky
2017-09-22 22:43     ` Boris Ostrovsky
2017-09-22 22:48       ` Boris Ostrovsky
2017-09-22 22:48       ` Boris Ostrovsky
2017-10-06 23:49       ` Stefano Stabellini
2017-10-06 23:49       ` Stefano Stabellini
2017-09-15 23:00   ` [PATCH v4 13/13] xen: introduce a Kconfig option to enable the pvcalls frontend Stefano Stabellini
2017-09-15 23:00   ` Stefano Stabellini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1505516440-11111-5-git-send-email-sstabellini@kernel.org \
    --to=sstabellini@kernel.org \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jgross@suse.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=stefano@aporeto.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.