All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tianyu Lan <ltykernel@gmail.com>
To: kys@microsoft.com, haiyangz@microsoft.com,
	sthemmin@microsoft.com, wei.liu@kernel.org, decui@microsoft.com,
	tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
	x86@kernel.org, hpa@zytor.com, arnd@arndb.de,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, akpm@linux-foundation.org,
	kirill.shutemov@linux.intel.com, rppt@kernel.org,
	hannes@cmpxchg.org, cai@lca.pw, krish.sadhukhan@oracle.com,
	saravanand@fb.com, Tianyu.Lan@microsoft.com,
	konrad.wilk@oracle.com, hch@lst.de, m.szyprowski@samsung.com,
	robin.murphy@arm.com, boris.ostrovsky@oracle.com,
	jgross@suse.com, sstabellini@kernel.org, joro@8bytes.org,
	will@kernel.org, xen-devel@lists.xenproject.org,
	davem@davemloft.net, kuba@kernel.org, jejb@linux.ibm.com,
	martin.petersen@oracle.com
Cc: iommu@lists.linux-foundation.org, linux-arch@vger.kernel.org,
	linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-scsi@vger.kernel.org, netdev@vger.kernel.org,
	vkuznets@redhat.com, thomas.lendacky@amd.com,
	brijesh.singh@amd.com, sunilmut@microsoft.com
Subject: [RFC PATCH V3 11/11] HV/Storvsc: Add Isolation VM support for storvsc driver
Date: Sun, 30 May 2021 11:06:28 -0400	[thread overview]
Message-ID: <20210530150628.2063957-12-ltykernel@gmail.com> (raw)
In-Reply-To: <20210530150628.2063957-1-ltykernel@gmail.com>

From: Tianyu Lan <Tianyu.Lan@microsoft.com>

In Isolation VM, all shared memory with host needs to mark visible
to host via hvcall. vmbus_establish_gpadl() has already done it for
storvsc rx/tx ring buffer. The page buffer used by vmbus_sendpacket_
mpb_desc() still need to handle. Use DMA API to map/umap these
memory during sending/receiving packet and Hyper-V DMA ops callback
will use swiotlb function to allocate bounce buffer and copy data
from/to bounce buffer.

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
---
 drivers/scsi/storvsc_drv.c | 63 +++++++++++++++++++++++++++++++++++---
 1 file changed, 58 insertions(+), 5 deletions(-)

diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 403753929320..32da419c134e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
 #include <linux/device.h>
 #include <linux/hyperv.h>
 #include <linux/blkdev.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
@@ -427,6 +429,8 @@ struct storvsc_cmd_request {
 	u32 payload_sz;
 
 	struct vstor_packet vstor_packet;
+	u32 hvpg_count;
+	struct hv_dma_range *dma_range;
 };
 
 
@@ -1267,6 +1271,7 @@ static void storvsc_on_channel_callback(void *context)
 	struct hv_device *device;
 	struct storvsc_device *stor_device;
 	struct Scsi_Host *shost;
+	int i;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1321,6 +1326,17 @@ static void storvsc_on_channel_callback(void *context)
 				request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
 			}
 
+			if (request->dma_range) {
+				for (i = 0; i < request->hvpg_count; i++)
+					dma_unmap_page(&device->device,
+							request->dma_range[i].dma,
+							request->dma_range[i].mapping_size,
+							request->vstor_packet.vm_srb.data_in
+							     == READ_TYPE ?
+							DMA_FROM_DEVICE : DMA_TO_DEVICE);
+				kfree(request->dma_range);
+			}
+
 			storvsc_on_receive(stor_device, packet, request);
 			continue;
 		}
@@ -1817,7 +1833,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 		unsigned int hvpgoff, hvpfns_to_add;
 		unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
 		unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
+		dma_addr_t dma;
 		u64 hvpfn;
+		u32 size;
 
 		if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
 
@@ -1831,6 +1849,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 		payload->range.len = length;
 		payload->range.offset = offset_in_hvpg;
 
+		cmd_request->dma_range = kcalloc(hvpg_count,
+				 sizeof(*cmd_request->dma_range),
+				 GFP_ATOMIC);
+		if (!cmd_request->dma_range) {
+			ret = -ENOMEM;
+			goto free_payload;
+		}
 
 		for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
 			/*
@@ -1854,9 +1879,30 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 			 * last sgl should be reached at the same time that
 			 * the PFN array is filled.
 			 */
-			while (hvpfns_to_add--)
-				payload->range.pfn_array[i++] =	hvpfn++;
+			while (hvpfns_to_add--) {
+				size = min(HV_HYP_PAGE_SIZE - offset_in_hvpg,
+					   (unsigned long)length);
+				dma = dma_map_page(&dev->device,
+							 pfn_to_page(hvpfn++),
+							 offset_in_hvpg, size,
+							 scmnd->sc_data_direction);
+				if (dma_mapping_error(&dev->device, dma)) {
+					ret = -ENOMEM;
+					goto free_dma_range;
+				}
+
+				if (offset_in_hvpg) {
+					payload->range.offset = dma & ~HV_HYP_PAGE_MASK;
+					offset_in_hvpg = 0;
+				}
+
+				cmd_request->dma_range[i].dma = dma;
+				cmd_request->dma_range[i].mapping_size = size;
+				payload->range.pfn_array[i++] = dma >> HV_HYP_PAGE_SHIFT;
+				length -= size;
+			}
 		}
+		cmd_request->hvpg_count = hvpg_count;
 	}
 
 	cmd_request->payload = payload;
@@ -1867,13 +1913,20 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	put_cpu();
 
 	if (ret == -EAGAIN) {
-		if (payload_sz > sizeof(cmd_request->mpb))
-			kfree(payload);
 		/* no more space */
-		return SCSI_MLQUEUE_DEVICE_BUSY;
+		ret = SCSI_MLQUEUE_DEVICE_BUSY;
+		goto free_dma_range;
 	}
 
 	return 0;
+
+free_dma_range:
+	kfree(cmd_request->dma_range);
+
+free_payload:
+	if (payload_sz > sizeof(cmd_request->mpb))
+		kfree(payload);
+	return ret;
 }
 
 static struct scsi_host_template scsi_driver = {
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Tianyu Lan <ltykernel@gmail.com>
To: kys@microsoft.com, haiyangz@microsoft.com,
	sthemmin@microsoft.com, wei.liu@kernel.org, decui@microsoft.com,
	tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
	x86@kernel.org, hpa@zytor.com, arnd@arndb.de,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, akpm@linux-foundation.org,
	kirill.shutemov@linux.intel.com, rppt@kernel.org,
	hannes@cmpxchg.org, cai@lca.pw, krish.sadhukhan@oracle.com,
	saravanand@fb.com, Tianyu.Lan@microsoft.com,
	konrad.wilk@oracle.com, hch@lst.de, m.szyprowski@samsung.com,
	robin.murphy@arm.com, boris.ostrovsky@oracle.com,
	jgross@suse.com, sstabellini@kernel.org, joro@8bytes.org,
	will@kernel.org, xen-devel@lists.xenproject.org,
	davem@davemloft.net, kuba@kernel.org, jejb@linux.ibm.com,
	martin.petersen@oracle.com
Cc: linux-arch@vger.kernel.org, thomas.lendacky@amd.com,
	linux-hyperv@vger.kernel.org, brijesh.singh@amd.com,
	linux-scsi@vger.kernel.org, netdev@vger.kernel.org,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	vkuznets@redhat.com, sunilmut@microsoft.com
Subject: [RFC PATCH V3 11/11] HV/Storvsc: Add Isolation VM support for storvsc driver
Date: Sun, 30 May 2021 11:06:28 -0400	[thread overview]
Message-ID: <20210530150628.2063957-12-ltykernel@gmail.com> (raw)
In-Reply-To: <20210530150628.2063957-1-ltykernel@gmail.com>

From: Tianyu Lan <Tianyu.Lan@microsoft.com>

In Isolation VM, all shared memory with host needs to mark visible
to host via hvcall. vmbus_establish_gpadl() has already done it for
storvsc rx/tx ring buffer. The page buffer used by vmbus_sendpacket_
mpb_desc() still need to handle. Use DMA API to map/umap these
memory during sending/receiving packet and Hyper-V DMA ops callback
will use swiotlb function to allocate bounce buffer and copy data
from/to bounce buffer.

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
---
 drivers/scsi/storvsc_drv.c | 63 +++++++++++++++++++++++++++++++++++---
 1 file changed, 58 insertions(+), 5 deletions(-)

diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 403753929320..32da419c134e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
 #include <linux/device.h>
 #include <linux/hyperv.h>
 #include <linux/blkdev.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
@@ -427,6 +429,8 @@ struct storvsc_cmd_request {
 	u32 payload_sz;
 
 	struct vstor_packet vstor_packet;
+	u32 hvpg_count;
+	struct hv_dma_range *dma_range;
 };
 
 
@@ -1267,6 +1271,7 @@ static void storvsc_on_channel_callback(void *context)
 	struct hv_device *device;
 	struct storvsc_device *stor_device;
 	struct Scsi_Host *shost;
+	int i;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1321,6 +1326,17 @@ static void storvsc_on_channel_callback(void *context)
 				request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
 			}
 
+			if (request->dma_range) {
+				for (i = 0; i < request->hvpg_count; i++)
+					dma_unmap_page(&device->device,
+							request->dma_range[i].dma,
+							request->dma_range[i].mapping_size,
+							request->vstor_packet.vm_srb.data_in
+							     == READ_TYPE ?
+							DMA_FROM_DEVICE : DMA_TO_DEVICE);
+				kfree(request->dma_range);
+			}
+
 			storvsc_on_receive(stor_device, packet, request);
 			continue;
 		}
@@ -1817,7 +1833,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 		unsigned int hvpgoff, hvpfns_to_add;
 		unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
 		unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
+		dma_addr_t dma;
 		u64 hvpfn;
+		u32 size;
 
 		if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
 
@@ -1831,6 +1849,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 		payload->range.len = length;
 		payload->range.offset = offset_in_hvpg;
 
+		cmd_request->dma_range = kcalloc(hvpg_count,
+				 sizeof(*cmd_request->dma_range),
+				 GFP_ATOMIC);
+		if (!cmd_request->dma_range) {
+			ret = -ENOMEM;
+			goto free_payload;
+		}
 
 		for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
 			/*
@@ -1854,9 +1879,30 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 			 * last sgl should be reached at the same time that
 			 * the PFN array is filled.
 			 */
-			while (hvpfns_to_add--)
-				payload->range.pfn_array[i++] =	hvpfn++;
+			while (hvpfns_to_add--) {
+				size = min(HV_HYP_PAGE_SIZE - offset_in_hvpg,
+					   (unsigned long)length);
+				dma = dma_map_page(&dev->device,
+							 pfn_to_page(hvpfn++),
+							 offset_in_hvpg, size,
+							 scmnd->sc_data_direction);
+				if (dma_mapping_error(&dev->device, dma)) {
+					ret = -ENOMEM;
+					goto free_dma_range;
+				}
+
+				if (offset_in_hvpg) {
+					payload->range.offset = dma & ~HV_HYP_PAGE_MASK;
+					offset_in_hvpg = 0;
+				}
+
+				cmd_request->dma_range[i].dma = dma;
+				cmd_request->dma_range[i].mapping_size = size;
+				payload->range.pfn_array[i++] = dma >> HV_HYP_PAGE_SHIFT;
+				length -= size;
+			}
 		}
+		cmd_request->hvpg_count = hvpg_count;
 	}
 
 	cmd_request->payload = payload;
@@ -1867,13 +1913,20 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	put_cpu();
 
 	if (ret == -EAGAIN) {
-		if (payload_sz > sizeof(cmd_request->mpb))
-			kfree(payload);
 		/* no more space */
-		return SCSI_MLQUEUE_DEVICE_BUSY;
+		ret = SCSI_MLQUEUE_DEVICE_BUSY;
+		goto free_dma_range;
 	}
 
 	return 0;
+
+free_dma_range:
+	kfree(cmd_request->dma_range);
+
+free_payload:
+	if (payload_sz > sizeof(cmd_request->mpb))
+		kfree(payload);
+	return ret;
 }
 
 static struct scsi_host_template scsi_driver = {
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2021-05-30 15:07 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-30 15:06 [RFC PATCH V3 00/11] x86/Hyper-V: Add Hyper-V Isolation VM support Tianyu Lan
2021-05-30 15:06 ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 01/11] x86/HV: Initialize GHCB page in Isolation VM Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-07  6:41   ` Christoph Hellwig
2021-06-07  6:41     ` Christoph Hellwig
2021-06-07  8:14     ` Tianyu Lan
2021-06-07  8:14       ` Tianyu Lan
2021-06-09 12:38   ` Joerg Roedel
2021-06-09 12:38     ` Joerg Roedel
2021-06-10 14:13     ` Tianyu Lan
2021-06-10 14:13       ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 02/11] x86/HV: Initialize shared memory boundary in the " Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 03/11] x86/Hyper-V: Add new hvcall guest address host visibility support Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-05-30 18:25   ` Borislav Petkov
2021-05-30 18:25     ` Borislav Petkov
2021-05-31  4:08     ` Tianyu Lan
2021-05-31  4:08       ` Tianyu Lan
2021-06-10  9:47   ` Vitaly Kuznetsov
2021-06-10  9:47     ` Vitaly Kuznetsov
2021-06-10 14:18     ` Tianyu Lan
2021-06-10 14:18       ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 04/11] HV: Add Write/Read MSR registers via ghcb Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-09 12:46   ` Joerg Roedel
2021-06-09 12:46     ` Joerg Roedel
2021-06-10 14:15     ` Tianyu Lan
2021-06-10 14:15       ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 05/11] HV: Add ghcb hvcall support for SNP VM Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-09 12:49   ` Joerg Roedel
2021-06-09 12:49     ` Joerg Roedel
2021-05-30 15:06 ` [RFC PATCH V3 06/11] HV/Vmbus: Add SNP support for VMbus channel initiate message Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 07/11] HV/Vmbus: Initialize VMbus ring buffer for Isolation VM Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 08/11] swiotlb: Add bounce buffer remap address setting function Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-07  6:43   ` Christoph Hellwig
2021-06-07  6:43     ` Christoph Hellwig
2021-06-07 14:56     ` Tianyu Lan
2021-06-07 14:56       ` Tianyu Lan
2021-06-10 14:25       ` Tianyu Lan
2021-06-10 14:25         ` Tianyu Lan
2021-06-14  7:12       ` Christoph Hellwig
2021-06-14  7:12         ` Christoph Hellwig
2021-06-14 13:29         ` Tom Lendacky
2021-06-14 13:29           ` Tom Lendacky via iommu
2021-06-14 13:37         ` Tianyu Lan
2021-06-14 13:37           ` Tianyu Lan
2021-06-14 13:42           ` Tianyu Lan
2021-06-14 13:42             ` Tianyu Lan
2021-06-14 13:49     ` Robin Murphy
2021-06-14 13:49       ` Robin Murphy
2021-06-14 15:32       ` Christoph Hellwig
2021-06-14 15:32         ` Christoph Hellwig
2021-06-15 15:24         ` Tianyu Lan
2021-06-15 15:24           ` Tianyu Lan
2021-07-12  9:40           ` Tianyu Lan
2021-07-12  9:40             ` Tianyu Lan
2021-05-30 15:06 ` [RFC PATCH V3 09/11] HV/IOMMU: Enable swiotlb bounce buffer for Isolation VM Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-02  1:16   ` Boris Ostrovsky
2021-06-02  1:16     ` Boris Ostrovsky
2021-06-02 15:01     ` Tianyu Lan
2021-06-02 15:01       ` Tianyu Lan
2021-06-02 16:02       ` Boris Ostrovsky
2021-06-02 16:02         ` Boris Ostrovsky
2021-06-03 15:37         ` Tianyu Lan
2021-06-03 15:37           ` Tianyu Lan
2021-06-03 17:04           ` Boris Ostrovsky
2021-06-03 17:04             ` Boris Ostrovsky
2021-06-07  6:44             ` Christoph Hellwig
2021-06-07  6:44               ` Christoph Hellwig
2021-05-30 15:06 ` [RFC PATCH V3 10/11] HV/Netvsc: Add Isolation VM support for netvsc driver Tianyu Lan
2021-05-30 15:06   ` Tianyu Lan
2021-06-07  6:50   ` Christoph Hellwig
2021-06-07  6:50     ` Christoph Hellwig
2021-06-07 15:21     ` Tianyu Lan
2021-06-07 15:21       ` Tianyu Lan
2021-06-14  7:09       ` Christoph Hellwig
2021-06-14  7:09         ` Christoph Hellwig
2021-06-14 14:04         ` Tianyu Lan
2021-06-14 14:04           ` Tianyu Lan
2021-06-14 15:33           ` Christoph Hellwig
2021-06-14 15:33             ` Christoph Hellwig
2021-06-15 14:31             ` Tianyu Lan
2021-06-15 14:31               ` Tianyu Lan
2021-06-10  9:52   ` Vitaly Kuznetsov
2021-06-10  9:52     ` Vitaly Kuznetsov
2021-05-30 15:06 ` Tianyu Lan [this message]
2021-05-30 15:06   ` [RFC PATCH V3 11/11] HV/Storvsc: Add Isolation VM support for storvsc driver Tianyu Lan
2021-06-07  6:46   ` Christoph Hellwig
2021-06-07  6:46     ` Christoph Hellwig
2021-06-07 14:59     ` Tianyu Lan
2021-06-07 14:59       ` Tianyu Lan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210530150628.2063957-12-ltykernel@gmail.com \
    --to=ltykernel@gmail.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=cai@lca.pw \
    --cc=dave.hansen@linux.intel.com \
    --cc=davem@davemloft.net \
    --cc=decui@microsoft.com \
    --cc=haiyangz@microsoft.com \
    --cc=hannes@cmpxchg.org \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jejb@linux.ibm.com \
    --cc=jgross@suse.com \
    --cc=joro@8bytes.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=konrad.wilk@oracle.com \
    --cc=krish.sadhukhan@oracle.com \
    --cc=kuba@kernel.org \
    --cc=kys@microsoft.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=m.szyprowski@samsung.com \
    --cc=martin.petersen@oracle.com \
    --cc=mingo@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=rppt@kernel.org \
    --cc=saravanand@fb.com \
    --cc=sstabellini@kernel.org \
    --cc=sthemmin@microsoft.com \
    --cc=sunilmut@microsoft.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.