From: Michael Kelley <mikelley@microsoft.com>
To: hpa@zytor.com, kys@microsoft.com, haiyangz@microsoft.com,
wei.liu@kernel.org, decui@microsoft.com, luto@kernel.org,
peterz@infradead.org, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, lpieralisi@kernel.org,
robh@kernel.org, kw@linux.com, bhelgaas@google.com,
arnd@arndb.de, hch@infradead.org, m.szyprowski@samsung.com,
robin.murphy@arm.com, thomas.lendacky@amd.com,
brijesh.singh@amd.com, tglx@linutronix.de, mingo@redhat.com,
bp@alien8.de, dave.hansen@linux.intel.com,
Tianyu.Lan@microsoft.com, kirill.shutemov@linux.intel.com,
sathyanarayanan.kuppuswamy@linux.intel.com, ak@linux.intel.com,
isaku.yamahata@intel.com, dan.j.williams@intel.com,
jane.chu@oracle.com, seanjc@google.com, tony.luck@intel.com,
x86@kernel.org, linux-kernel@vger.kernel.org,
linux-hyperv@vger.kernel.org, netdev@vger.kernel.org,
linux-pci@vger.kernel.org, linux-arch@vger.kernel.org,
iommu@lists.linux.dev
Cc: mikelley@microsoft.com
Subject: [Patch v4 12/13] PCI: hv: Add hypercalls to read/write MMIO space
Date: Thu, 1 Dec 2022 19:30:30 -0800 [thread overview]
Message-ID: <1669951831-4180-13-git-send-email-mikelley@microsoft.com> (raw)
In-Reply-To: <1669951831-4180-1-git-send-email-mikelley@microsoft.com>
To support PCI pass-thru devices in Confidential VMs, Hyper-V
has added hypercalls to read and write MMIO space. Add the
appropriate definitions to hyperv-tlfs.h and implement
functions to make the hypercalls. These functions are used
in a subsequent patch.
Co-developed-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
---
arch/x86/include/asm/hyperv-tlfs.h | 3 ++
drivers/pci/controller/pci-hyperv.c | 64 +++++++++++++++++++++++++++++++++++++
include/asm-generic/hyperv-tlfs.h | 22 +++++++++++++
3 files changed, 89 insertions(+)
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 6d9368e..4d67c38 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -117,6 +117,9 @@
/* Recommend using enlightened VMCS */
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
+/* Use hypercalls for MMIO config space access */
+#define HV_X64_USE_MMIO_HYPERCALLS BIT(21)
+
/*
* CPU management features identification.
* These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 084f531..bbe6e36 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1041,6 +1041,70 @@ static int wslot_to_devfn(u32 wslot)
return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
}
+static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
+{
+ struct hv_mmio_read_input *in;
+ struct hv_mmio_read_output *out;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument page. Use it for
+ * both input and output.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
+ in->gpa = gpa;
+ in->size = size;
+
+ ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
+ if (hv_result_success(ret)) {
+ switch (size) {
+ case 1:
+ *val = *(u8 *)(out->data);
+ break;
+ case 2:
+ *val = *(u16 *)(out->data);
+ break;
+ default:
+ *val = *(u32 *)(out->data);
+ break;
+ }
+ } else
+ dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
+static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
+{
+ struct hv_mmio_write_input *in;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument memory.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ in->gpa = gpa;
+ in->size = size;
+ switch (size) {
+ case 1:
+ *(u8 *)(in->data) = val;
+ break;
+ case 2:
+ *(u16 *)(in->data) = val;
+ break;
+ default:
+ *(u32 *)(in->data) = val;
+ break;
+ }
+
+ ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
+ if (!hv_result_success(ret))
+ dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
/*
* PCI Configuration Space for these root PCI buses is implemented as a pair
* of pages in memory-mapped I/O space. Writing to the first page chooses
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index b17c6ee..fcab07b 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -168,6 +168,8 @@ struct ms_hyperv_tsc_page {
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+#define HVCALL_MMIO_READ 0x0106
+#define HVCALL_MMIO_WRITE 0x0107
/* Extended hypercalls */
#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
@@ -790,4 +792,24 @@ struct hv_memory_hint {
union hv_gpa_page_range ranges[];
} __packed;
+/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
+#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
+
+struct hv_mmio_read_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+} __packed;
+
+struct hv_mmio_read_output {
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+struct hv_mmio_write_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
#endif
--
1.8.3.1
next prev parent reply other threads:[~2022-12-02 3:34 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-02 3:30 [Patch v4 00/13] Add PCI pass-thru support to Hyper-V Confidential VMs Michael Kelley
2022-12-02 3:30 ` [Patch v4 01/13] x86/ioapic: Gate decrypted mapping on cc_platform_has() attribute Michael Kelley
2022-12-06 19:22 ` Borislav Petkov
2022-12-06 19:54 ` Michael Kelley (LINUX)
2022-12-29 11:39 ` Borislav Petkov
2022-12-29 16:02 ` Michael Kelley (LINUX)
2022-12-06 19:27 ` Sathyanarayanan Kuppuswamy
2022-12-02 3:30 ` [Patch v4 02/13] x86/hyperv: Reorder code in prep for subsequent patch Michael Kelley
2022-12-02 3:30 ` [Patch v4 03/13] Drivers: hv: Explicitly request decrypted in vmap_pfn() calls Michael Kelley
2022-12-29 12:05 ` Borislav Petkov
2022-12-02 3:30 ` [Patch v4 04/13] x86/mm: Handle decryption/re-encryption of bss_decrypted consistently Michael Kelley
2022-12-29 12:17 ` Borislav Petkov
2022-12-29 16:25 ` Michael Kelley (LINUX)
2023-01-09 19:10 ` Borislav Petkov
2023-01-09 19:14 ` Michael Kelley (LINUX)
2022-12-29 16:54 ` Bjorn Helgaas
2022-12-29 17:12 ` Borislav Petkov
2022-12-02 3:30 ` [Patch v4 05/13] init: Call mem_encrypt_init() after Hyper-V hypercall init is done Michael Kelley
2022-12-06 19:37 ` Sathyanarayanan Kuppuswamy
2022-12-06 20:13 ` Michael Kelley (LINUX)
2022-12-06 20:30 ` Sathyanarayanan Kuppuswamy
2022-12-02 3:30 ` [Patch v4 06/13] x86/hyperv: Change vTOM handling to use standard coco mechanisms Michael Kelley
2023-01-09 16:38 ` Borislav Petkov
2023-01-09 17:37 ` Michael Kelley (LINUX)
2023-01-09 18:07 ` Borislav Petkov
2022-12-02 3:30 ` [Patch v4 07/13] swiotlb: Remove bounce buffer remapping for Hyper-V Michael Kelley
2023-01-09 18:05 ` Borislav Petkov
2022-12-02 3:30 ` [Patch v4 08/13] Drivers: hv: vmbus: Remove second mapping of VMBus monitor pages Michael Kelley
2022-12-02 3:30 ` [Patch v4 09/13] Drivers: hv: vmbus: Remove second way of mapping ring buffers Michael Kelley
2022-12-02 3:30 ` [Patch v4 10/13] hv_netvsc: Remove second mapping of send and recv buffers Michael Kelley
2022-12-02 3:30 ` [Patch v4 11/13] Drivers: hv: Don't remap addresses that are above shared_gpa_boundary Michael Kelley
2022-12-02 3:30 ` Michael Kelley [this message]
2022-12-02 3:30 ` [Patch v4 13/13] PCI: hv: Enable PCI pass-thru devices in Confidential VMs Michael Kelley
2023-01-09 18:47 ` [Patch v4 00/13] Add PCI pass-thru support to Hyper-V " Borislav Petkov
2023-01-09 19:35 ` Michael Kelley (LINUX)
2023-01-12 14:03 ` Wei Liu
2023-01-19 17:58 ` Dexuan Cui
2023-01-20 11:58 ` Borislav Petkov
2023-01-20 12:42 ` Wei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1669951831-4180-13-git-send-email-mikelley@microsoft.com \
--to=mikelley@microsoft.com \
--cc=Tianyu.Lan@microsoft.com \
--cc=ak@linux.intel.com \
--cc=arnd@arndb.de \
--cc=bhelgaas@google.com \
--cc=bp@alien8.de \
--cc=brijesh.singh@amd.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=decui@microsoft.com \
--cc=edumazet@google.com \
--cc=haiyangz@microsoft.com \
--cc=hch@infradead.org \
--cc=hpa@zytor.com \
--cc=iommu@lists.linux.dev \
--cc=isaku.yamahata@intel.com \
--cc=jane.chu@oracle.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kuba@kernel.org \
--cc=kw@linux.com \
--cc=kys@microsoft.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=lpieralisi@kernel.org \
--cc=luto@kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=mingo@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=peterz@infradead.org \
--cc=robh@kernel.org \
--cc=robin.murphy@arm.com \
--cc=sathyanarayanan.kuppuswamy@linux.intel.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=tony.luck@intel.com \
--cc=wei.liu@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).