All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linux-efi@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	kvm@vger.kernel.org
Cc: "Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"H . Peter Anvin" <hpa@zytor.com>, "Borislav Petkov" <bp@suse.de>,
	"Andy Lutomirski" <luto@kernel.org>,
	"Tony Luck" <tony.luck@intel.com>,
	"Piotr Luc" <piotr.luc@intel.com>,
	"Tom Lendacky" <thomas.lendacky@amd.com>,
	"Fenghua Yu" <fenghua.yu@intel.com>,
	"Lu Baolu" <baolu.lu@linux.intel.com>,
	"Reza Arbab" <arbab@linux.vnet.ibm.com>,
	"David Howells" <dhowells@redhat.com>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	"Laura Abbott" <labbott@redhat.com>,
	"Ard Biesheuvel" <ard.biesheuvel@linaro.org>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Eric Biederman" <ebiederm@xmission.com>,
	"Benjamin Herrenschmidt" <benh@kernel.crashing.org>,
	"Paul Mackerras" <paulus@samba.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Dave Airlie" <airlied@redhat.com>,
	"Kees Cook" <keescook@chromium.org>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Arnd Bergmann" <arnd@arndb.de>, "Tejun Heo" <tj@kernel.org>,
	"Christoph Lameter" <cl@linux.com>,
	"Brijesh Singh" <brijesh.singh@amd.com>
Subject: [RFC Part1 PATCH v3 12/17] x86/mm: DMA support for SEV memory encryption
Date: Mon, 24 Jul 2017 14:07:52 -0500	[thread overview]
Message-ID: <20170724190757.11278-13-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170724190757.11278-1-brijesh.singh@amd.com>

From: Tom Lendacky <thomas.lendacky@amd.com>

DMA access to memory mapped as encrypted while SEV is active can not be
encrypted during device write or decrypted during device read. In order
for DMA to properly work when SEV is active, the SWIOTLB bounce buffers
must be used.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/mm/mem_encrypt.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++
 lib/swiotlb.c             |  5 +--
 2 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1e4643e..5e5d460 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -191,8 +191,86 @@ void __init sme_early_init(void)
 	/* Update the protection map with memory encryption mask */
 	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
 		protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+	if (sev_active())
+		swiotlb_force = SWIOTLB_FORCE;
+}
+
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		       gfp_t gfp, unsigned long attrs)
+{
+	unsigned long dma_mask;
+	unsigned int order;
+	struct page *page;
+	void *vaddr = NULL;
+
+	dma_mask = dma_alloc_coherent_mask(dev, gfp);
+	order = get_order(size);
+
+	/*
+	 * Memory will be memset to zero after marking decrypted, so don't
+	 * bother clearing it before.
+	 */
+	gfp &= ~__GFP_ZERO;
+
+	page = alloc_pages_node(dev_to_node(dev), gfp, order);
+	if (page) {
+		dma_addr_t addr;
+
+		/*
+		 * Since we will be clearing the encryption bit, check the
+		 * mask with it already cleared.
+		 */
+		addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
+		if ((addr + size) > dma_mask) {
+			__free_pages(page, get_order(size));
+		} else {
+			vaddr = page_address(page);
+			*dma_handle = addr;
+		}
+	}
+
+	if (!vaddr)
+		vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+	if (!vaddr)
+		return NULL;
+
+	/* Clear the SME encryption bit for DMA use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
+		set_memory_decrypted((unsigned long)vaddr, 1 << order);
+		memset(vaddr, 0, PAGE_SIZE << order);
+		*dma_handle = __sme_clr(*dma_handle);
+	}
+
+	return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+		     dma_addr_t dma_handle, unsigned long attrs)
+{
+	/* Set the SME encryption bit for re-use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
+		set_memory_encrypted((unsigned long)vaddr,
+				     1 << get_order(size));
+
+	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
+static const struct dma_map_ops sme_dma_ops = {
+	.alloc                  = sme_alloc,
+	.free                   = sme_free,
+	.map_page               = swiotlb_map_page,
+	.unmap_page             = swiotlb_unmap_page,
+	.map_sg                 = swiotlb_map_sg_attrs,
+	.unmap_sg               = swiotlb_unmap_sg_attrs,
+	.sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device     = swiotlb_sync_sg_for_device,
+	.mapping_error          = swiotlb_dma_mapping_error,
+};
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
@@ -202,6 +280,14 @@ void __init mem_encrypt_init(void)
 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 	swiotlb_update_mem_attributes();
 
+	/*
+	 * With SEV, DMA operations cannot use encryption. New DMA ops
+	 * are required in order to mark the DMA areas as decrypted or
+	 * to use bounce buffers.
+	 */
+	if (sev_active())
+		dma_ops = &sme_dma_ops;
+
 	pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8c6c83e..85fed2f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 	if (no_iotlb_memory)
 		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 
-	if (sme_active())
-		pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+	if (sme_active() || sev_active())
+		pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+			     sme_active() ? "SME" : "SEV");
 
 	mask = dma_get_seg_boundary(hwdev);
 
-- 
2.9.4

WARNING: multiple messages have this Message-ID (diff)
From: Brijesh Singh <brijesh.singh-5C7GfCeVMHo@public.gmane.org>
To: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	linux-efi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linuxppc-dev-uLR06cmDAlY/bJ5BZ2RsiQ@public.gmane.org,
	kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: Thomas Gleixner <tglx-hfZtesqFncYOwBW4kG4KsQ@public.gmane.org>,
	Ingo Molnar <mingo-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"H . Peter Anvin" <hpa-YMNOUZJC4hwAvxtiuMwx3w@public.gmane.org>,
	Borislav Petkov <bp-l3A5Bk7waGM@public.gmane.org>,
	Andy Lutomirski <luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Piotr Luc <piotr.luc-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>,
	Fenghua Yu <fenghua.yu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Lu Baolu <baolu.lu-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
	Reza Arbab
	<arbab-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>,
	David Howells <dhowells-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Matt Fleming
	<matt-mF/unelCI9GS6iBeEJttW/XRex20P6io@public.gmane.org>,
	"Kirill A . Shutemov"
	<kirill.shutemov-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
	Laura Abbott <labbott-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Ard Biesheuvel
	<ard.biesheuvel-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>,
	Andrew Morton
	<akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
	Eric Biederman <ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>,
	Benjamin Herrenschmidt
	<benh-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>Paul
	Mackerras <p>
Subject: [RFC Part1 PATCH v3 12/17] x86/mm: DMA support for SEV memory encryption
Date: Mon, 24 Jul 2017 14:07:52 -0500	[thread overview]
Message-ID: <20170724190757.11278-13-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170724190757.11278-1-brijesh.singh-5C7GfCeVMHo@public.gmane.org>

From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>

DMA access to memory mapped as encrypted while SEV is active can not be
encrypted during device write or decrypted during device read. In order
for DMA to properly work when SEV is active, the SWIOTLB bounce buffers
must be used.

Signed-off-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
Signed-off-by: Brijesh Singh <brijesh.singh-5C7GfCeVMHo@public.gmane.org>
---
 arch/x86/mm/mem_encrypt.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++
 lib/swiotlb.c             |  5 +--
 2 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1e4643e..5e5d460 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -191,8 +191,86 @@ void __init sme_early_init(void)
 	/* Update the protection map with memory encryption mask */
 	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
 		protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+	if (sev_active())
+		swiotlb_force = SWIOTLB_FORCE;
+}
+
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		       gfp_t gfp, unsigned long attrs)
+{
+	unsigned long dma_mask;
+	unsigned int order;
+	struct page *page;
+	void *vaddr = NULL;
+
+	dma_mask = dma_alloc_coherent_mask(dev, gfp);
+	order = get_order(size);
+
+	/*
+	 * Memory will be memset to zero after marking decrypted, so don't
+	 * bother clearing it before.
+	 */
+	gfp &= ~__GFP_ZERO;
+
+	page = alloc_pages_node(dev_to_node(dev), gfp, order);
+	if (page) {
+		dma_addr_t addr;
+
+		/*
+		 * Since we will be clearing the encryption bit, check the
+		 * mask with it already cleared.
+		 */
+		addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
+		if ((addr + size) > dma_mask) {
+			__free_pages(page, get_order(size));
+		} else {
+			vaddr = page_address(page);
+			*dma_handle = addr;
+		}
+	}
+
+	if (!vaddr)
+		vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+	if (!vaddr)
+		return NULL;
+
+	/* Clear the SME encryption bit for DMA use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
+		set_memory_decrypted((unsigned long)vaddr, 1 << order);
+		memset(vaddr, 0, PAGE_SIZE << order);
+		*dma_handle = __sme_clr(*dma_handle);
+	}
+
+	return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+		     dma_addr_t dma_handle, unsigned long attrs)
+{
+	/* Set the SME encryption bit for re-use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
+		set_memory_encrypted((unsigned long)vaddr,
+				     1 << get_order(size));
+
+	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
+static const struct dma_map_ops sme_dma_ops = {
+	.alloc                  = sme_alloc,
+	.free                   = sme_free,
+	.map_page               = swiotlb_map_page,
+	.unmap_page             = swiotlb_unmap_page,
+	.map_sg                 = swiotlb_map_sg_attrs,
+	.unmap_sg               = swiotlb_unmap_sg_attrs,
+	.sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device     = swiotlb_sync_sg_for_device,
+	.mapping_error          = swiotlb_dma_mapping_error,
+};
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
@@ -202,6 +280,14 @@ void __init mem_encrypt_init(void)
 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 	swiotlb_update_mem_attributes();
 
+	/*
+	 * With SEV, DMA operations cannot use encryption. New DMA ops
+	 * are required in order to mark the DMA areas as decrypted or
+	 * to use bounce buffers.
+	 */
+	if (sev_active())
+		dma_ops = &sme_dma_ops;
+
 	pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8c6c83e..85fed2f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 	if (no_iotlb_memory)
 		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 
-	if (sme_active())
-		pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+	if (sme_active() || sev_active())
+		pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+			     sme_active() ? "SME" : "SEV");
 
 	mask = dma_get_seg_boundary(hwdev);
 
-- 
2.9.4

WARNING: multiple messages have this Message-ID (diff)
From: Brijesh Singh <brijesh.singh-5C7GfCeVMHo@public.gmane.org>
To: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	linux-efi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linuxppc-dev-uLR06cmDAlY/bJ5BZ2RsiQ@public.gmane.org,
	kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: Thomas Gleixner <tglx-hfZtesqFncYOwBW4kG4KsQ@public.gmane.org>,
	Ingo Molnar <mingo-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"H . Peter Anvin" <hpa-YMNOUZJC4hwAvxtiuMwx3w@public.gmane.org>,
	Borislav Petkov <bp-l3A5Bk7waGM@public.gmane.org>,
	Andy Lutomirski <luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Piotr Luc <piotr.luc-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>,
	Fenghua Yu <fenghua.yu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Lu Baolu <baolu.lu-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
	Reza Arbab
	<arbab-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>,
	David Howells <dhowells-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Matt Fleming
	<matt-mF/unelCI9GS6iBeEJttW/XRex20P6io@public.gmane.org>,
	"Kirill A . Shutemov"
	<kirill.shutemov-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
	Laura Abbott <labbott-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Ard Biesheuvel
	<ard.biesheuvel-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>,
	Andrew Morton
	<akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
	Eric Biederman <ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>,
	Benjamin Herrenschmidt
	<benh-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>,
	Paul Mackerras <p
Subject: [RFC Part1 PATCH v3 12/17] x86/mm: DMA support for SEV memory encryption
Date: Mon, 24 Jul 2017 14:07:52 -0500	[thread overview]
Message-ID: <20170724190757.11278-13-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170724190757.11278-1-brijesh.singh-5C7GfCeVMHo@public.gmane.org>

From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>

DMA access to memory mapped as encrypted while SEV is active can not be
encrypted during device write or decrypted during device read. In order
for DMA to properly work when SEV is active, the SWIOTLB bounce buffers
must be used.

Signed-off-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
Signed-off-by: Brijesh Singh <brijesh.singh-5C7GfCeVMHo@public.gmane.org>
---
 arch/x86/mm/mem_encrypt.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++
 lib/swiotlb.c             |  5 +--
 2 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1e4643e..5e5d460 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -191,8 +191,86 @@ void __init sme_early_init(void)
 	/* Update the protection map with memory encryption mask */
 	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
 		protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+	if (sev_active())
+		swiotlb_force = SWIOTLB_FORCE;
+}
+
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		       gfp_t gfp, unsigned long attrs)
+{
+	unsigned long dma_mask;
+	unsigned int order;
+	struct page *page;
+	void *vaddr = NULL;
+
+	dma_mask = dma_alloc_coherent_mask(dev, gfp);
+	order = get_order(size);
+
+	/*
+	 * Memory will be memset to zero after marking decrypted, so don't
+	 * bother clearing it before.
+	 */
+	gfp &= ~__GFP_ZERO;
+
+	page = alloc_pages_node(dev_to_node(dev), gfp, order);
+	if (page) {
+		dma_addr_t addr;
+
+		/*
+		 * Since we will be clearing the encryption bit, check the
+		 * mask with it already cleared.
+		 */
+		addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
+		if ((addr + size) > dma_mask) {
+			__free_pages(page, get_order(size));
+		} else {
+			vaddr = page_address(page);
+			*dma_handle = addr;
+		}
+	}
+
+	if (!vaddr)
+		vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+	if (!vaddr)
+		return NULL;
+
+	/* Clear the SME encryption bit for DMA use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
+		set_memory_decrypted((unsigned long)vaddr, 1 << order);
+		memset(vaddr, 0, PAGE_SIZE << order);
+		*dma_handle = __sme_clr(*dma_handle);
+	}
+
+	return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+		     dma_addr_t dma_handle, unsigned long attrs)
+{
+	/* Set the SME encryption bit for re-use if not swiotlb area */
+	if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
+		set_memory_encrypted((unsigned long)vaddr,
+				     1 << get_order(size));
+
+	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
+static const struct dma_map_ops sme_dma_ops = {
+	.alloc                  = sme_alloc,
+	.free                   = sme_free,
+	.map_page               = swiotlb_map_page,
+	.unmap_page             = swiotlb_unmap_page,
+	.map_sg                 = swiotlb_map_sg_attrs,
+	.unmap_sg               = swiotlb_unmap_sg_attrs,
+	.sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device     = swiotlb_sync_sg_for_device,
+	.mapping_error          = swiotlb_dma_mapping_error,
+};
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
@@ -202,6 +280,14 @@ void __init mem_encrypt_init(void)
 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 	swiotlb_update_mem_attributes();
 
+	/*
+	 * With SEV, DMA operations cannot use encryption. New DMA ops
+	 * are required in order to mark the DMA areas as decrypted or
+	 * to use bounce buffers.
+	 */
+	if (sev_active())
+		dma_ops = &sme_dma_ops;
+
 	pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8c6c83e..85fed2f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 	if (no_iotlb_memory)
 		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 
-	if (sme_active())
-		pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+	if (sme_active() || sev_active())
+		pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+			     sme_active() ? "SME" : "SEV");
 
 	mask = dma_get_seg_boundary(hwdev);
 
-- 
2.9.4

  parent reply	other threads:[~2017-07-24 19:12 UTC|newest]

Thread overview: 226+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-24 19:07 [RFC Part1 PATCH v3 00/17] x86: Secure Encrypted Virtualization (AMD) Brijesh Singh
2017-07-24 19:07 ` Brijesh Singh
2017-07-24 19:07 ` Brijesh Singh
2017-07-24 19:07 ` [RFC Part1 PATCH v3 01/17] Documentation/x86: Add AMD Secure Encrypted Virtualization (SEV) descrption Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-25  5:45   ` Borislav Petkov
2017-07-25  5:45     ` Borislav Petkov
2017-07-25  5:45     ` Borislav Petkov
2017-07-25 14:59     ` Brijesh Singh
2017-07-25 14:59       ` Brijesh Singh
2017-07-25 14:59       ` Brijesh Singh
2017-07-24 19:07 ` [RFC Part1 PATCH v3 02/17] x86/CPU/AMD: Add the Secure Encrypted Virtualization CPU feature Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-25 10:26   ` Borislav Petkov
2017-07-25 10:26     ` Borislav Petkov
2017-07-25 10:26     ` Borislav Petkov
2017-07-25 14:29     ` Tom Lendacky
2017-07-25 14:29       ` Tom Lendacky
2017-07-25 14:29       ` Tom Lendacky
2017-07-25 14:36       ` Borislav Petkov
2017-07-25 14:36         ` Borislav Petkov
2017-07-25 14:36         ` Borislav Petkov
2017-07-25 14:58         ` Tom Lendacky
2017-07-25 14:58           ` Tom Lendacky
2017-07-25 14:58           ` Tom Lendacky
2017-07-25 15:13           ` Borislav Petkov
2017-07-25 15:13             ` Borislav Petkov
2017-07-25 15:13             ` Borislav Petkov
2017-07-25 15:29             ` Tom Lendacky
2017-07-25 15:29               ` Tom Lendacky
2017-07-25 15:29               ` Tom Lendacky
2017-07-25 15:33               ` Borislav Petkov
2017-07-25 15:33                 ` Borislav Petkov
2017-07-25 15:33                 ` Borislav Petkov
2017-08-09 18:17                 ` Tom Lendacky
2017-08-09 18:17                   ` Tom Lendacky
2017-08-17  8:12                   ` Borislav Petkov
2017-08-17  8:12                     ` Borislav Petkov
2017-08-17  8:12                     ` Borislav Petkov
2017-07-24 19:07 ` [RFC Part1 PATCH v3 03/17] x86/mm: Secure Encrypted Virtualization (SEV) support Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-26  4:28   ` Borislav Petkov
2017-07-26  4:28     ` Borislav Petkov
2017-07-26  4:28     ` Borislav Petkov
2017-07-26 16:47     ` Tom Lendacky
2017-07-26 16:47       ` Tom Lendacky
2017-07-26 16:47       ` Tom Lendacky
2017-07-27 13:39       ` Borislav Petkov
2017-07-27 13:39         ` Borislav Petkov
2017-07-27 13:39         ` Borislav Petkov
2017-07-24 19:07 ` [RFC Part1 PATCH v3 04/17] x86/mm: Don't attempt to encrypt initrd under SEV Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-26 14:44   ` Borislav Petkov
2017-07-26 14:44     ` Borislav Petkov
2017-07-26 14:44     ` Borislav Petkov
2017-07-24 19:07 ` [RFC Part1 PATCH v3 05/17] x86, realmode: Don't decrypt trampoline area " Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-26 16:03   ` Borislav Petkov
2017-07-26 16:03     ` Borislav Petkov
2017-07-26 16:03     ` Borislav Petkov
2017-08-10 13:03     ` Tom Lendacky
2017-08-10 13:03       ` Tom Lendacky
2017-08-10 13:03       ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 06/17] x86/mm: Use encrypted access of boot related data with SEV Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-27 13:31   ` Borislav Petkov
2017-07-27 13:31     ` Borislav Petkov
2017-07-27 13:31     ` Borislav Petkov
2017-08-17 18:05     ` Tom Lendacky
2017-08-17 18:05       ` Tom Lendacky
2017-08-17 18:05       ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 07/17] x86/mm: Include SEV for encryption memory attribute changes Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-27 14:58   ` Borislav Petkov
2017-07-27 14:58     ` Borislav Petkov
2017-07-27 14:58     ` Borislav Petkov
2017-07-28  8:47     ` David Laight
2017-07-28  8:47       ` David Laight
2017-07-28  8:47       ` David Laight
2017-08-17 18:21       ` Tom Lendacky
2017-08-17 18:21         ` Tom Lendacky
2017-08-17 18:10     ` Tom Lendacky
2017-08-17 18:10       ` Tom Lendacky
2017-08-17 18:10       ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 08/17] x86/efi: Access EFI data as encrypted when SEV is active Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-28 10:31   ` Borislav Petkov
2017-07-28 10:31     ` Borislav Petkov
2017-07-28 10:31     ` Borislav Petkov
2017-08-17 18:42     ` Tom Lendacky
2017-08-17 18:42       ` Tom Lendacky
2017-08-17 18:42       ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 09/17] resource: Consolidate resource walking code Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-28 15:23   ` Borislav Petkov
2017-07-28 15:23     ` Borislav Petkov
2017-07-28 15:23     ` Borislav Petkov
2017-08-17 18:55     ` Tom Lendacky
2017-08-17 18:55       ` Tom Lendacky
2017-08-17 18:55       ` Tom Lendacky
2017-08-17 19:03       ` Tom Lendacky
2017-08-17 19:03         ` Tom Lendacky
2017-08-17 19:03         ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 10/17] resource: Provide resource struct in resource walk callback Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-31  8:26   ` Borislav Petkov
2017-07-31  8:26     ` Borislav Petkov
2017-07-31  8:26     ` Borislav Petkov
2017-07-31 22:19   ` Kees Cook
2017-07-31 22:19     ` Kees Cook
2017-07-31 22:19     ` Kees Cook
2017-07-24 19:07 ` [RFC Part1 PATCH v3 11/17] x86/mm, resource: Use PAGE_KERNEL protection for ioremap of memory pages Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-02  4:02   ` Borislav Petkov
2017-08-02  4:02     ` Borislav Petkov
2017-08-02  4:02     ` Borislav Petkov
2017-08-17 19:22     ` Tom Lendacky
2017-08-17 19:22       ` Tom Lendacky
2017-08-17 19:22       ` Tom Lendacky
2017-07-24 19:07 ` Brijesh Singh [this message]
2017-07-24 19:07   ` [RFC Part1 PATCH v3 12/17] x86/mm: DMA support for SEV memory encryption Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-07  3:48   ` Borislav Petkov
2017-08-07  3:48     ` Borislav Petkov
2017-08-07  3:48     ` Borislav Petkov
2017-08-17 19:35     ` Tom Lendacky
2017-08-17 19:35       ` Tom Lendacky
2017-08-17 19:35       ` Tom Lendacky
2017-07-24 19:07 ` [RFC Part1 PATCH v3 13/17] x86/io: Unroll string I/O when SEV is active Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-25  9:51   ` David Laight
2017-07-25  9:51     ` David Laight
2017-07-25  9:51     ` David Laight
2017-07-26 10:45     ` Arnd Bergmann
2017-07-26 10:45       ` Arnd Bergmann
2017-07-26 19:24       ` Brijesh Singh
2017-07-26 19:24         ` Brijesh Singh
2017-07-26 19:26         ` H. Peter Anvin
2017-07-26 19:26           ` H. Peter Anvin
2017-07-26 19:26           ` H. Peter Anvin
2017-07-26 19:26           ` H. Peter Anvin
2017-07-26 20:07           ` Brijesh Singh
2017-07-26 20:07             ` Brijesh Singh
2017-07-27  7:45             ` David Laight
2017-07-27  7:45               ` David Laight
2017-07-27  7:45               ` David Laight
2017-08-22 16:52             ` Borislav Petkov
2017-08-22 16:52               ` Borislav Petkov
2017-09-15 12:24               ` Borislav Petkov
2017-09-15 12:24                 ` Borislav Petkov
2017-09-15 14:13                 ` Brijesh Singh
2017-09-15 14:13                   ` Brijesh Singh
2017-09-15 14:40                   ` Borislav Petkov
2017-09-15 14:40                     ` Borislav Petkov
2017-09-15 14:48                     ` Brijesh Singh
2017-09-15 14:48                       ` Brijesh Singh
2017-09-15 16:22                       ` Borislav Petkov
2017-09-15 16:22                         ` Borislav Petkov
2017-09-15 16:27                         ` Brijesh Singh
2017-09-15 16:27                           ` Brijesh Singh
2017-07-24 19:07 ` [RFC Part1 PATCH v3 14/17] x86/boot: Add early boot support when running with SEV active Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-23 15:30   ` Borislav Petkov
2017-08-23 15:30     ` Borislav Petkov
2017-08-23 15:30     ` Borislav Petkov
2017-08-24 18:54     ` Tom Lendacky
2017-08-24 18:54       ` Tom Lendacky
2017-08-24 18:54       ` Tom Lendacky
2017-08-25 12:54       ` Borislav Petkov
2017-08-25 12:54         ` Borislav Petkov
2017-08-25 12:54         ` Borislav Petkov
2017-07-24 19:07 ` [RFC Part1 PATCH v3 15/17] x86: Add support for changing memory encryption attribute in early boot Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-28 10:51   ` Borislav Petkov
2017-08-28 10:51     ` Borislav Petkov
2017-08-28 10:51     ` Borislav Petkov
2017-08-28 11:49     ` Brijesh Singh
2017-08-28 11:49       ` Brijesh Singh
2017-08-28 11:49       ` Brijesh Singh
2017-07-24 19:07 ` [RFC Part1 PATCH v3 16/17] X86/KVM: Provide support to create Guest and HV shared per-CPU variables Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-29 10:22   ` Borislav Petkov
2017-08-29 10:22     ` Borislav Petkov
2017-08-29 10:22     ` Borislav Petkov
2017-08-30 16:18     ` Brijesh Singh
2017-08-30 16:18       ` Brijesh Singh
2017-08-30 16:18       ` Brijesh Singh
2017-08-30 17:46       ` Borislav Petkov
2017-08-30 17:46         ` Borislav Petkov
2017-08-30 17:46         ` Borislav Petkov
2017-09-01 22:52         ` Brijesh Singh
2017-09-01 22:52           ` Brijesh Singh
2017-09-01 22:52           ` Brijesh Singh
2017-09-02  3:21           ` Andy Lutomirski
2017-09-02  3:21             ` Andy Lutomirski
2017-09-02  3:21             ` Andy Lutomirski
2017-09-03  2:34             ` Brijesh Singh
2017-09-03  2:34               ` Brijesh Singh
2017-09-03  2:34               ` Brijesh Singh
2017-09-04 17:05           ` Borislav Petkov
2017-09-04 17:05             ` Borislav Petkov
2017-09-04 17:05             ` Borislav Petkov
2017-09-04 17:47             ` Brijesh Singh
2017-09-04 17:47               ` Brijesh Singh
2017-09-04 17:47               ` Brijesh Singh
2017-07-24 19:07 ` [RFC Part1 PATCH v3 17/17] X86/KVM: Clear encryption attribute when SEV is active Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-07-24 19:07   ` Brijesh Singh
2017-08-31 15:06   ` Borislav Petkov
2017-08-31 15:06     ` Borislav Petkov
2017-08-31 15:06     ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170724190757.11278-13-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=airlied@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=arbab@linux.vnet.ibm.com \
    --cc=ard.biesheuvel@linaro.org \
    --cc=arnd@arndb.de \
    --cc=baolu.lu@linux.intel.com \
    --cc=benh@kernel.crashing.org \
    --cc=bp@suse.de \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dhowells@redhat.com \
    --cc=ebiederm@xmission.com \
    --cc=fenghua.yu@intel.com \
    --cc=hpa@zytor.com \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=labbott@redhat.com \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=luto@kernel.org \
    --cc=matt@codeblueprint.co.uk \
    --cc=mingo@redhat.com \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    --cc=piotr.luc@intel.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tj@kernel.org \
    --cc=tony.luck@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.