linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tianyu Lan <ltykernel@gmail.com>
To: kys@microsoft.com, haiyangz@microsoft.com,
	sthemmin@microsoft.com, wei.liu@kernel.org, jejb@linux.ibm.com,
	martin.petersen@oracle.com
Cc: Tianyu Lan <Tianyu.Lan@microsoft.com>,
	linux-hyperv@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-kernel@vger.kernel.org, vkuznets@redhat.com,
	thomas.lendacky@amd.com, brijesh.singh@amd.com,
	sunilmut@microsoft.com
Subject: [RFC V2 PATCH 12/12] HV/Storvsc: Add Isolation VM support for storvsc driver
Date: Tue, 13 Apr 2021 11:22:17 -0400	[thread overview]
Message-ID: <20210413152217.3386288-13-ltykernel@gmail.com> (raw)
In-Reply-To: <20210413152217.3386288-1-ltykernel@gmail.com>

From: Tianyu Lan <Tianyu.Lan@microsoft.com>

In Isolation VM, all shared memory with host needs to mark visible
to host via hvcall. vmbus_establish_gpadl() has already done it for
netvsc rx/tx ring buffer. The page buffer used by vmbus_sendpacket_
mpb_desc() still need to handle. Use DMA API to map/umap these
memory during sending/receiving packet and Hyper-V DMA ops callback
will use swiotlb fucntion to allocate bounce buffer and copy data
from/to bounce buffer.

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
---
 drivers/scsi/storvsc_drv.c | 67 +++++++++++++++++++++++++++++++++++++-
 1 file changed, 66 insertions(+), 1 deletion(-)

diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2e4fa77445fd..d271578b1811 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
 #include <linux/device.h>
 #include <linux/hyperv.h>
 #include <linux/blkdev.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
@@ -414,6 +416,11 @@ static void storvsc_on_channel_callback(void *context);
 #define STORVSC_IDE_MAX_TARGETS				1
 #define STORVSC_IDE_MAX_CHANNELS			1
 
+struct dma_range {
+	dma_addr_t dma;
+	u32 mapping_size;
+};
+
 struct storvsc_cmd_request {
 	struct scsi_cmnd *cmd;
 
@@ -427,6 +434,8 @@ struct storvsc_cmd_request {
 	u32 payload_sz;
 
 	struct vstor_packet vstor_packet;
+	u32 hvpg_count;
+	struct dma_range *dma_range;
 };
 
 
@@ -1236,6 +1245,7 @@ static void storvsc_on_channel_callback(void *context)
 	const struct vmpacket_descriptor *desc;
 	struct hv_device *device;
 	struct storvsc_device *stor_device;
+	int i;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1249,6 +1259,8 @@ static void storvsc_on_channel_callback(void *context)
 	foreach_vmbus_pkt(desc, channel) {
 		void *packet = hv_pkt_data(desc);
 		struct storvsc_cmd_request *request;
+		enum dma_data_direction dir;
+		u32 attrs;
 		u64 cmd_rqst;
 
 		cmd_rqst = vmbus_request_addr(&channel->requestor,
@@ -1261,6 +1273,22 @@ static void storvsc_on_channel_callback(void *context)
 
 		request = (struct storvsc_cmd_request *)(unsigned long)cmd_rqst;
 
+		if (request->vstor_packet.vm_srb.data_in == READ_TYPE)
+			dir = DMA_FROM_DEVICE;
+		 else
+			dir = DMA_TO_DEVICE;
+
+		if (request->dma_range) {
+			for (i = 0; i < request->hvpg_count; i++)
+				dma_unmap_page_attrs(&device->device,
+						request->dma_range[i].dma,
+						request->dma_range[i].mapping_size,
+						request->vstor_packet.vm_srb.data_in
+						     == READ_TYPE ?
+						DMA_FROM_DEVICE : DMA_TO_DEVICE, attrs);
+			kfree(request->dma_range);
+		}
+
 		if (request == &stor_device->init_request ||
 		    request == &stor_device->reset_request) {
 			memcpy(&request->vstor_packet, packet,
@@ -1682,8 +1710,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	struct vmscsi_request *vm_srb;
 	struct scatterlist *cur_sgl;
 	struct vmbus_packet_mpb_array  *payload;
+	enum dma_data_direction dir;
 	u32 payload_sz;
 	u32 length;
+	u32 attrs;
 
 	if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
 		/*
@@ -1722,14 +1752,17 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	case DMA_TO_DEVICE:
 		vm_srb->data_in = WRITE_TYPE;
 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+		dir = DMA_TO_DEVICE;
 		break;
 	case DMA_FROM_DEVICE:
 		vm_srb->data_in = READ_TYPE;
 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+		dir = DMA_FROM_DEVICE;
 		break;
 	case DMA_NONE:
 		vm_srb->data_in = UNKNOWN_TYPE;
 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+		dir = DMA_NONE;
 		break;
 	default:
 		/*
@@ -1786,6 +1819,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 		hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
 
 		cur_sgl = sgl;
+
+		cmd_request->dma_range = kzalloc(sizeof(struct dma_range) * hvpg_count,
+			      GFP_ATOMIC);
+		if (!cmd_request->dma_range)
+			return -ENOMEM;
+
 		for (i = 0; i < hvpg_count; i++) {
 			/*
 			 * 'i' is the index of hv pages in the payload and
@@ -1805,6 +1844,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 			 */
 			unsigned int hvpgoff_in_page =
 				(i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
+			dma_addr_t dma;
+			u32 size;
 
 			/*
 			 * Two cases that we need to fetch a page:
@@ -1817,8 +1858,28 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 				cur_sgl = sg_next(cur_sgl);
 			}
 
-			payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
+			size = min(HV_HYP_PAGE_SIZE - offset_in_hvpg, (unsigned long)length);
+			dma = dma_map_page_attrs(&dev->device,
+						 pfn_to_page(hvpfn),
+						 offset_in_hvpg, size,
+						 scmnd->sc_data_direction, attrs);
+			if (dma_mapping_error(&dev->device, dma)) {
+				pr_warn("dma map error.\n");
+				ret = -ENOMEM;
+				goto free_dma_range;
+			}
+
+			if (offset_in_hvpg) {
+				payload->range.offset = dma & ~HV_HYP_PAGE_MASK;
+				offset_in_hvpg = 0;
+			}
+
+			cmd_request->dma_range[i].dma = dma;
+			cmd_request->dma_range[i].mapping_size = size;
+			payload->range.pfn_array[i] = dma >> HV_HYP_PAGE_SHIFT;
+			length -= size;
 		}
+		cmd_request->hvpg_count = hvpg_count;
 	}
 
 	cmd_request->payload = payload;
@@ -1836,6 +1897,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	}
 
 	return 0;
+
+free_dma_range:
+	kfree(cmd_request->dma_range);
+	return ret;
 }
 
 static struct scsi_host_template scsi_driver = {
-- 
2.25.1


      parent reply	other threads:[~2021-04-13 15:23 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-13 15:22 [RFC V2 PATCH 00/12] x86/Hyper-V: Add Hyper-V Isolation VM support Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 1/12] x86/HV: Initialize GHCB page in Isolation VM Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 2/12] x86/HV: Initialize shared memory boundary " Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 3/12] x86/Hyper-V: Add new hvcall guest address host visibility support Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 4/12] HV: Add Write/Read MSR registers via ghcb Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 5/12] HV: Add ghcb hvcall support for SNP VM Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 6/12] HV/Vmbus: Add SNP support for VMbus channel initiate message Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 7/12] HV/Vmbus: Initialize VMbus ring buffer for Isolation VM Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 8/12] UIO/Hyper-V: Not load UIO HV driver in the isolation VM Tianyu Lan
2021-04-13 15:59   ` Greg KH
2021-04-13 16:00   ` Greg KH
2021-04-14 15:20     ` Tianyu Lan
2021-04-14 15:36       ` Greg KH
2021-04-13 15:22 ` [RFC V2 PATCH 9/12] swiotlb: Add bounce buffer remap address setting function Tianyu Lan
2021-04-14  6:43   ` Christoph Hellwig
2021-04-14 14:12     ` Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 10/12] HV/IOMMU: Add Hyper-V dma ops support Tianyu Lan
2021-04-13 15:22 ` [RFC V2 PATCH 11/12] HV/Netvsc: Add Isolation VM support for netvsc driver Tianyu Lan
2021-04-18  9:53   ` Leon Romanovsky
2021-04-13 15:22 ` Tianyu Lan [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210413152217.3386288-13-ltykernel@gmail.com \
    --to=ltykernel@gmail.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=brijesh.singh@amd.com \
    --cc=haiyangz@microsoft.com \
    --cc=jejb@linux.ibm.com \
    --cc=kys@microsoft.com \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=sthemmin@microsoft.com \
    --cc=sunilmut@microsoft.com \
    --cc=thomas.lendacky@amd.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).