All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: "Brijesh Singh" <brijesh.singh@amd.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>, "Borislav Petkov" <bp@suse.de>,
	"Tom Lendacky" <thomas.lendacky@amd.com>,
	x86@kernel.org
Subject: [Part2 PATCH v4 22/29] KVM: SVM: Add support for SEV DEBUG_ENCRYPT command
Date: Tue, 19 Sep 2017 15:46:20 -0500	[thread overview]
Message-ID: <20170919204627.3875-23-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170919204627.3875-1-brijesh.singh@amd.com>

The command copies a plaintext into guest memory and encrypts it using
the VM encryption key. The command will be used for debug purposes
(e.g setting breakpoints through gdbserver)

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/kvm/svm.c | 132 +++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 124 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4d51ccb462db..16b338d9dc87 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6203,6 +6203,104 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long paddr,
 	return ret;
 }
 
+static int __sev_dbg_encrypt(struct kvm *kvm, unsigned long __user vaddr,
+			     unsigned long paddr, unsigned long __user dst_vaddr,
+			     unsigned long dst_paddr, int size, int *error)
+{
+	struct page *src_tpage = NULL;
+	struct page *dst_tpage = NULL;
+	int ret, len = size;
+
+	/*
+	 *  If source buffer is not 16-byte aligned then we copy the data from
+	 *  source buffer into a PAGE aligned intermediate (src_tpage) buffer
+	 *  and use this intermediate buffer as source buffer.
+	 */
+	if (!IS_ALIGNED(vaddr, 16)) {
+		src_tpage = alloc_page(GFP_KERNEL);
+		if (!src_tpage)
+			return -ENOMEM;
+
+		if (copy_from_user(page_address(src_tpage),
+			(void __user *)(uintptr_t)vaddr, size)) {
+			__free_page(src_tpage);
+			return -EFAULT;
+		}
+		paddr = __sme_page_pa(src_tpage);
+
+		/* flush the caches to ensure that DRAM has recent contents */
+		clflush_cache_range(page_address(src_tpage), PAGE_SIZE);
+	}
+
+	/*
+	 *  If destination buffer or length is not 16-byte aligned then:
+	 *   - decrypt portion of destination buffer into intermediate buffer
+	 *     (dst_tpage)
+	 *   - copy the source data into intermediate buffer
+	 *   - use the intermediate buffer as source buffer
+	 */
+	if (!IS_ALIGNED(dst_vaddr, 16) ||
+	    !IS_ALIGNED(size, 16)) {
+		int dst_offset;
+
+		dst_tpage = alloc_page(GFP_KERNEL);
+		if (!dst_tpage) {
+			ret = -ENOMEM;
+			goto e_free;
+		}
+
+		/* decrypt destination buffer into intermediate buffer */
+		ret = __sev_dbg_decrypt(kvm,
+					round_down(dst_paddr, 16),
+					0,
+					(unsigned long)page_address(dst_tpage),
+					__sme_page_pa(dst_tpage),
+					round_up(size, 16),
+					error);
+		if (ret)
+			goto e_free;
+
+		dst_offset = dst_paddr & 15;
+
+		/*
+		 * modify the intermediate buffer with data from source buffer.
+		 */
+		if (src_tpage)
+			memcpy(page_address(dst_tpage) + dst_offset,
+			       page_address(src_tpage), size);
+		else {
+			if (copy_from_user(page_address(dst_tpage) + dst_offset,
+					   (void __user *)(uintptr_t)vaddr, size)) {
+				ret = -EFAULT;
+				goto e_free;
+			}
+		}
+
+
+		/* use intermediate buffer as source */
+		paddr = __sme_page_pa(dst_tpage);
+
+		/*
+		 * Cache accesses between x86 and PSP are not coherent. Lets
+		 * flush the caches to ensure that we can get the updated contents.
+		 */
+		clflush_cache_range(page_address(dst_tpage), PAGE_SIZE);
+
+		/* now we have length and destination buffer aligned */
+		dst_paddr = round_down(dst_paddr, 16);
+		len = round_up(size, 16);
+	}
+
+	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
+
+e_free:
+	if (src_tpage)
+		__free_page(src_tpage);
+	if (dst_tpage)
+		__free_page(dst_tpage);
+	return ret;
+}
+
 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 {
 	unsigned long vaddr, vaddr_end, next_vaddr;
@@ -6240,9 +6338,10 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 		}
 
 		/*
-		 * Cache access between te PSP and x86 are not coherent, hence we
-		 * flush the caches to buffers shared with PSP to ensure that we
-		 * will be able to see the PSP updates.
+		 * PSP will access the guest memory range with C=1, but hypervisor
+		 * cache may still be looking the memory with C=0, lets make sure
+		 * we flush the caches so that data gets accessed with correct C-bit
+		 * on both PSP and x86 side.
 		 */
 		sev_clflush_pages(src_p, 1);
 		sev_clflush_pages(dst_p, 1);
@@ -6255,11 +6354,19 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 		d_off = dst_vaddr & ~PAGE_MASK;
 		len = min_t(size_t, (PAGE_SIZE - s_off), size);
 
-		ret = __sev_dbg_decrypt(kvm,
-				       __sme_page_pa(src_p[0]) + s_off,
-				       dst_vaddr, 0,
-				       __sme_page_pa(dst_p[0]) + d_off,
-				       len, &argp->error);
+		if (dec)
+			ret = __sev_dbg_decrypt(kvm,
+						__sme_page_pa(src_p[0]) + s_off,
+						dst_vaddr, 0,
+						__sme_page_pa(dst_p[0]) + d_off,
+						len, &argp->error);
+		else
+			ret = __sev_dbg_encrypt(kvm,
+						vaddr,
+						__sme_page_pa(src_p[0]) + s_off,
+						dst_vaddr,
+						__sme_page_pa(dst_p[0]) + d_off,
+						len, &argp->error);
 
 		sev_unpin_memory(kvm, src_p, 1);
 		sev_unpin_memory(kvm, dst_p, 1);
@@ -6280,6 +6387,11 @@ static int sev_dbg_decrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	return sev_dbg_crypt(kvm, argp, true);
 }
 
+static int sev_dbg_encrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+	return sev_dbg_crypt(kvm, argp, false);
+}
+
 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_sev_cmd sev_cmd;
@@ -6319,6 +6431,10 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 		r = sev_dbg_decrypt(kvm, &sev_cmd);
 		break;
 	}
+	case KVM_SEV_DBG_ENCRYPT: {
+		r = sev_dbg_encrypt(kvm, &sev_cmd);
+		break;
+	}
 	default:
 		break;
 	}
-- 
2.9.5

  parent reply	other threads:[~2017-09-19 20:50 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-19 20:45 [Part2 PATCH v4 00/29] x86: Secure Encrypted Virtualization (AMD) Brijesh Singh
2017-09-19 20:45 ` [Part2 PATCH v4 01/29] Documentation/virtual/kvm: Add AMD Secure Encrypted Virtualization (SEV) Brijesh Singh
2017-09-29  6:54   ` Borislav Petkov
2017-10-02 11:52     ` Brijesh Singh
2017-10-02 13:21       ` Borislav Petkov
2017-10-04 10:11   ` [PATCH v5] " Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 02/29] x86/CPU/AMD: Add the Secure Encrypted Virtualization CPU feature Brijesh Singh
2017-09-29 12:19   ` Borislav Petkov
2017-09-29 22:44     ` Brijesh Singh
2017-09-30  8:02       ` Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 03/29] kvm: svm: prepare for new bit definition in nested_ctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 04/29] kvm: svm: Add SEV feature definitions to KVM Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 05/29] crypto: ccp: Add Platform Security Processor (PSP) device support Brijesh Singh
2017-09-29 15:16   ` Borislav Petkov
2017-09-29 16:23     ` [Part2 PATCH v4.1 05/30] " Brijesh Singh
2017-09-30  8:30       ` [PATCH] crypto: ccp: Build the AMD secure processor driver only with AMD CPU support Borislav Petkov
2017-09-30 14:06         ` Brijesh Singh
2017-09-30 15:53           ` Borislav Petkov
2017-09-30 15:55     ` [Part2 PATCH v4 05/29] crypto: ccp: Add Platform Security Processor (PSP) device support Brijesh Singh
2017-09-30 16:11       ` Borislav Petkov
2017-10-01 20:05         ` Brijesh Singh
2017-10-03 16:17           ` Borislav Petkov
2017-10-03 16:19             ` Brijesh Singh
2017-10-02 16:43   ` [Part2 Patch v4.2] " Brijesh Singh
2017-10-04  6:36     ` P J P
2017-10-04  9:15       ` Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 06/29] ccp: crypto: Define SEV key management command id Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 07/29] crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support Brijesh Singh
2017-09-22 21:35   ` [PATCH] crypto: ccp: fix eno.cocci warnings kbuild test robot
2017-09-22 21:35   ` [Part2 PATCH v4 07/29] crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support kbuild test robot
2017-09-19 20:46 ` [Part2 PATCH v4 08/29] KVM: SVM: Prepare to reserve asid for SEV guest Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 09/29] KVM: SVM: Reserve ASID range " Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 10/29] KVM: X86: Extend CPUID range to include new leaf Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 11/29] KVM: Introduce KVM_MEMORY_ENCRYPT_OP ioctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 12/29] KVM: Introduce KVM_MEMORY_ENCRYPT_REGISTER_REGION ioctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 13/29] KVM: Define SEV key management command id Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 14/29] KVM: SVM: Add KVM_SEV_INIT command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 15/29] KVM: SVM: VMRUN should use assosiated ASID when SEV is enabled Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 16/29] KVM: SVM: Add support for SEV LAUNCH_START command Brijesh Singh
2017-09-22 22:13   ` [PATCH] KVM: SVM: fix eno.cocci warnings kbuild test robot
2017-09-22 22:13   ` [Part2 PATCH v4 16/29] KVM: SVM: Add support for SEV LAUNCH_START command kbuild test robot
2017-09-19 20:46 ` [Part2 PATCH v4 17/29] KVM: SVM: Add support for SEV LAUNCH_UPDATE_DATA command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 18/29] KVM: SVM: Add support for SEV LAUNCH_MEASURE command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 19/29] KVM: SVM: Add support for SEV LAUNCH_FINISH command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 20/29] KVM: svm: Add support for SEV GUEST_STATUS command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 21/29] KVM: SVM: Add support for SEV DEBUG_DECRYPT command Brijesh Singh
2017-09-19 20:46 ` Brijesh Singh [this message]
2017-09-19 20:46 ` [Part2 PATCH v4 23/29] KVM: SVM: Add support for SEV LAUNCH_SECRET command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 24/29] KVM: SVM: Pin guest memory when SEV is active Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 25/29] KVM: X86: Add memory encryption enabled ops Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 26/29] KVM: SVM: Clear C-bit from the page fault address Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 27/29] KVM: SVM: Do not install #UD intercept when SEV is enabled Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 28/29] KVM: X86: Restart the guest when insn_len is zero and " Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 29/29] KVM: X86: Add CONFIG_KVM_AMD_SEV Brijesh Singh
2017-09-22 20:37   ` kbuild test robot
2017-09-22 22:18   ` kbuild test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170919204627.3875-23-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=bp@suse.de \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.