All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: "Brijesh Singh" <brijesh.singh@amd.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>, "Borislav Petkov" <bp@suse.de>,
	"Tom Lendacky" <thomas.lendacky@amd.com>,
	x86@kernel.org
Subject: [Part2 PATCH v4 21/29] KVM: SVM: Add support for SEV DEBUG_DECRYPT command
Date: Tue, 19 Sep 2017 15:46:19 -0500	[thread overview]
Message-ID: <20170919204627.3875-22-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170919204627.3875-1-brijesh.singh@amd.com>

The command is used for decrypting a guest memory region for debug
purposes.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/kvm/svm.c | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 179 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7a6e82c48142..4d51ccb462db 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1622,7 +1622,6 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
 
 	*n = npages;
 	sev->locked = locked;
-
 	return pages;
 err:
 	if (pinned > 0)
@@ -6106,6 +6105,181 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	return ret;
 }
 
+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+			       unsigned long dst, int size,
+			       int *error, bool enc)
+{
+	struct kvm_sev_info *sev = &kvm->arch.sev_info;
+	struct sev_data_dbg *data;
+	int ret;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->handle = sev->handle;
+	data->dst_addr = dst;
+	data->src_addr = src;
+	data->len = size;
+
+	ret = sev_issue_cmd(kvm,
+			    enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+			    data, error);
+	kfree(data);
+	return ret;
+}
+
+/*
+ * Decrypt source memory into userspace or kernel buffer. If destination buffer
+ * or len is not aligned to 16-byte boundary then it uses intermediate buffer.
+ */
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long paddr,
+			     unsigned long __user dst_uaddr,
+			     unsigned long dst_kaddr, unsigned long dst_paddr,
+			     int size, int *error)
+{
+	int ret, offset = 0, len = size;
+	struct page *tpage = NULL;
+
+	/*
+	 * Debug command works with 16-byte aligned inputs, check if all inputs
+	 * (src, dst and len) are 16-byte aligned. If one of the input is not
+	 * aligned then we decrypt more than requested into a temporary buffer
+	 * and copy the porition of data into destination buffer.
+	 */
+	if (!IS_ALIGNED(paddr, 	   16) ||
+	    !IS_ALIGNED(dst_paddr, 16) ||
+	    !IS_ALIGNED(size, 	   16)) {
+		tpage = (void *)alloc_page(GFP_KERNEL);
+		if (!tpage)
+			return -ENOMEM;
+
+		dst_paddr = __sme_page_pa(tpage);
+
+		/*
+		 * if source buffer is not aligned then offset will be used
+		 * when copying the data from the temporary buffer into
+		 * destination buffer.
+		 */
+		offset = paddr & 15;
+
+		/* its safe to read more than requested size. */
+		len = round_up(size + offset, 16);
+
+		paddr = round_down(paddr, 16);
+
+		/*
+		 * Cache access from the PSP are coherent with x86 but not other
+		 * way around. Hence we flush the destination caches to ensure
+		 * that x86 is able to see the PSP updates.
+		 */
+		clflush_cache_range(page_address(tpage), PAGE_SIZE);
+	}
+
+	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, false);
+
+	/*
+	 * If temporary buffer is used then copy the data from temporary buffer
+	 * into destination buffer.
+	 */
+	if (!ret && tpage) {
+
+		/*
+		 * If destination buffer is a userspace buffer then use
+		 * copy_to_user otherwise memcpy.
+		 */
+		if (dst_uaddr) {
+			if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+					 page_address(tpage) + offset, size))
+				ret = -EFAULT;
+		} else {
+			memcpy((void *)dst_kaddr, page_address(tpage) + offset, size);
+		}
+	}
+
+	if (tpage)
+		__free_page(tpage);
+
+	return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+	unsigned long vaddr, vaddr_end, next_vaddr;
+	unsigned long dst_vaddr, dst_vaddr_end;
+	struct page **src_p, **dst_p;
+	struct kvm_sev_dbg debug;
+	unsigned long n;
+	int ret, size;
+
+	if (!sev_guest(kvm))
+		return -ENOTTY;
+
+	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data,
+			   sizeof(struct kvm_sev_dbg)))
+		return -EFAULT;
+
+	vaddr = debug.src_uaddr;
+	size = debug.len;
+	vaddr_end = vaddr + size;
+	dst_vaddr = debug.dst_uaddr;
+	dst_vaddr_end = dst_vaddr + size;
+
+	for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+		int len, s_off, d_off;
+
+		/* lock userspace source and destination page */
+		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+		if (!src_p)
+			return -EFAULT;
+
+		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+		if (!dst_p) {
+			sev_unpin_memory(kvm, src_p, n);
+			return -EFAULT;
+		}
+
+		/*
+		 * Cache access between te PSP and x86 are not coherent, hence we
+		 * flush the caches to buffers shared with PSP to ensure that we
+		 * will be able to see the PSP updates.
+		 */
+		sev_clflush_pages(src_p, 1);
+		sev_clflush_pages(dst_p, 1);
+
+		/*
+		 * since user buffer may not be page aligned, calculate the
+		 * offset within the page.
+		 */
+		s_off = vaddr & ~PAGE_MASK;
+		d_off = dst_vaddr & ~PAGE_MASK;
+		len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+		ret = __sev_dbg_decrypt(kvm,
+				       __sme_page_pa(src_p[0]) + s_off,
+				       dst_vaddr, 0,
+				       __sme_page_pa(dst_p[0]) + d_off,
+				       len, &argp->error);
+
+		sev_unpin_memory(kvm, src_p, 1);
+		sev_unpin_memory(kvm, dst_p, 1);
+
+		if (ret)
+			goto err;
+
+		next_vaddr = vaddr + len;
+		dst_vaddr = dst_vaddr + len;
+		size -= len;
+	}
+err:
+	return ret;
+}
+
+static int sev_dbg_decrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+	return sev_dbg_crypt(kvm, argp, true);
+}
+
 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_sev_cmd sev_cmd;
@@ -6141,6 +6315,10 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 		r = sev_guest_status(kvm, &sev_cmd);
 		break;
 	}
+	case KVM_SEV_DBG_DECRYPT: {
+		r = sev_dbg_decrypt(kvm, &sev_cmd);
+		break;
+	}
 	default:
 		break;
 	}
-- 
2.9.5

  parent reply	other threads:[~2017-09-19 20:50 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-19 20:45 [Part2 PATCH v4 00/29] x86: Secure Encrypted Virtualization (AMD) Brijesh Singh
2017-09-19 20:45 ` [Part2 PATCH v4 01/29] Documentation/virtual/kvm: Add AMD Secure Encrypted Virtualization (SEV) Brijesh Singh
2017-09-29  6:54   ` Borislav Petkov
2017-10-02 11:52     ` Brijesh Singh
2017-10-02 13:21       ` Borislav Petkov
2017-10-04 10:11   ` [PATCH v5] " Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 02/29] x86/CPU/AMD: Add the Secure Encrypted Virtualization CPU feature Brijesh Singh
2017-09-29 12:19   ` Borislav Petkov
2017-09-29 22:44     ` Brijesh Singh
2017-09-30  8:02       ` Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 03/29] kvm: svm: prepare for new bit definition in nested_ctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 04/29] kvm: svm: Add SEV feature definitions to KVM Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 05/29] crypto: ccp: Add Platform Security Processor (PSP) device support Brijesh Singh
2017-09-29 15:16   ` Borislav Petkov
2017-09-29 16:23     ` [Part2 PATCH v4.1 05/30] " Brijesh Singh
2017-09-30  8:30       ` [PATCH] crypto: ccp: Build the AMD secure processor driver only with AMD CPU support Borislav Petkov
2017-09-30 14:06         ` Brijesh Singh
2017-09-30 15:53           ` Borislav Petkov
2017-09-30 15:55     ` [Part2 PATCH v4 05/29] crypto: ccp: Add Platform Security Processor (PSP) device support Brijesh Singh
2017-09-30 16:11       ` Borislav Petkov
2017-10-01 20:05         ` Brijesh Singh
2017-10-03 16:17           ` Borislav Petkov
2017-10-03 16:19             ` Brijesh Singh
2017-10-02 16:43   ` [Part2 Patch v4.2] " Brijesh Singh
2017-10-04  6:36     ` P J P
2017-10-04  9:15       ` Borislav Petkov
2017-09-19 20:46 ` [Part2 PATCH v4 06/29] ccp: crypto: Define SEV key management command id Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 07/29] crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support Brijesh Singh
2017-09-22 21:35   ` [PATCH] crypto: ccp: fix eno.cocci warnings kbuild test robot
2017-09-22 21:35   ` [Part2 PATCH v4 07/29] crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support kbuild test robot
2017-09-19 20:46 ` [Part2 PATCH v4 08/29] KVM: SVM: Prepare to reserve asid for SEV guest Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 09/29] KVM: SVM: Reserve ASID range " Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 10/29] KVM: X86: Extend CPUID range to include new leaf Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 11/29] KVM: Introduce KVM_MEMORY_ENCRYPT_OP ioctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 12/29] KVM: Introduce KVM_MEMORY_ENCRYPT_REGISTER_REGION ioctl Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 13/29] KVM: Define SEV key management command id Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 14/29] KVM: SVM: Add KVM_SEV_INIT command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 15/29] KVM: SVM: VMRUN should use assosiated ASID when SEV is enabled Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 16/29] KVM: SVM: Add support for SEV LAUNCH_START command Brijesh Singh
2017-09-22 22:13   ` [PATCH] KVM: SVM: fix eno.cocci warnings kbuild test robot
2017-09-22 22:13   ` [Part2 PATCH v4 16/29] KVM: SVM: Add support for SEV LAUNCH_START command kbuild test robot
2017-09-19 20:46 ` [Part2 PATCH v4 17/29] KVM: SVM: Add support for SEV LAUNCH_UPDATE_DATA command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 18/29] KVM: SVM: Add support for SEV LAUNCH_MEASURE command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 19/29] KVM: SVM: Add support for SEV LAUNCH_FINISH command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 20/29] KVM: svm: Add support for SEV GUEST_STATUS command Brijesh Singh
2017-09-19 20:46 ` Brijesh Singh [this message]
2017-09-19 20:46 ` [Part2 PATCH v4 22/29] KVM: SVM: Add support for SEV DEBUG_ENCRYPT command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 23/29] KVM: SVM: Add support for SEV LAUNCH_SECRET command Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 24/29] KVM: SVM: Pin guest memory when SEV is active Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 25/29] KVM: X86: Add memory encryption enabled ops Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 26/29] KVM: SVM: Clear C-bit from the page fault address Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 27/29] KVM: SVM: Do not install #UD intercept when SEV is enabled Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 28/29] KVM: X86: Restart the guest when insn_len is zero and " Brijesh Singh
2017-09-19 20:46 ` [Part2 PATCH v4 29/29] KVM: X86: Add CONFIG_KVM_AMD_SEV Brijesh Singh
2017-09-22 20:37   ` kbuild test robot
2017-09-22 22:18   ` kbuild test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170919204627.3875-22-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=bp@suse.de \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.