From: Brijesh Singh <brijesh.singh@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org, kvm@vger.kernel.org
Cc: "Thomas Gleixner" <tglx@linutronix.de>,
"Borislav Petkov" <bp@suse.de>, "Joerg Roedel" <joro@8bytes.org>,
"Michael S . Tsirkin" <mst@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"\\\"Radim Krčmář\\\"" <rkrcmar@redhat.com>,
"Tom Lendacky" <thomas.lendacky@amd.com>,
"Brijesh Singh" <brijesh.singh@amd.com>
Subject: [RFC Part2 PATCH v3 21/26] KVM: SVM: Add support for SEV DEBUG_ENCRYPT command
Date: Mon, 24 Jul 2017 15:02:58 -0500 [thread overview]
Message-ID: <20170724200303.12197-22-brijesh.singh@amd.com> (raw)
In-Reply-To: <20170724200303.12197-1-brijesh.singh@amd.com>
The command copies a plain text into guest memory and encrypts it using
the VM encryption key. The command will be used for debug purposes
(e.g setting breakpoint through gdbserver)
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
arch/x86/kvm/svm.c | 174 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 174 insertions(+)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 933384a..75dcaa9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6214,6 +6214,176 @@ static int sev_dbg_decrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}
+static int __sev_dbg_enc(struct kvm *kvm, unsigned long __user vaddr,
+ unsigned long paddr, unsigned long __user dst_vaddr,
+ unsigned long dst_paddr, int size, int *error)
+{
+ struct page *src_tpage = NULL;
+ struct page *dst_tpage = NULL;
+ int ret, len = size;
+
+ /*
+ * Debug encrypt command works with 16-byte aligned inputs. Function
+ * handles the alingment issue as below:
+ *
+ * case 1
+ * If source buffer is not 16-byte aligned then we copy the data from
+ * source buffer into a PAGE aligned intermediate (src_tpage) buffer
+ * and use this intermediate buffer as source buffer
+ *
+ * case 2
+ * If destination buffer or length is not 16-byte aligned then:
+ * - decrypt portion of destination buffer into intermediate buffer
+ * (dst_tpage)
+ * - copy the source data into intermediate buffer
+ * - use the intermediate buffer as source buffer
+ */
+
+ /* If source is not aligned (case 1) */
+ if (!IS_ALIGNED(vaddr, 16)) {
+ src_tpage = alloc_page(GFP_KERNEL);
+ if (!src_tpage)
+ return -ENOMEM;
+
+ if (copy_from_user(page_address(src_tpage),
+ (uint8_t *)vaddr, size)) {
+ __free_page(src_tpage);
+ return -EFAULT;
+ }
+ paddr = __sme_page_pa(src_tpage);
+
+ /* flush the caches to ensure that DRAM has recent contents */
+ clflush_cache_range(page_address(src_tpage), PAGE_SIZE);
+ }
+
+ /* If destination buffer or length is not aligned (case 2) */
+ if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+ int dst_offset;
+
+ dst_tpage = alloc_page(GFP_KERNEL);
+ if (!dst_tpage) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ /* decrypt destination buffer into intermediate buffer */
+ ret = __sev_dbg_dec(kvm,
+ round_down(dst_paddr, 16),
+ 0,
+ (unsigned long)page_address(dst_tpage),
+ __sme_page_pa(dst_tpage),
+ round_up(size, 16),
+ error);
+ if (ret)
+ goto e_free;
+
+ dst_offset = dst_paddr & 15;
+
+ /*
+ * modify the intermediate buffer with data from source
+ * buffer.
+ */
+ if (src_tpage)
+ memcpy(page_address(dst_tpage) + dst_offset,
+ page_address(src_tpage), size);
+ else {
+ if (copy_from_user(page_address(dst_tpage) + dst_offset,
+ (void *) vaddr, size)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+
+ /* use intermediate buffer as source */
+ paddr = __sme_page_pa(dst_tpage);
+
+ /* flush the caches to ensure that DRAM gets recent updates */
+ clflush_cache_range(page_address(dst_tpage), PAGE_SIZE);
+
+ /* now we have length and destination buffer aligned */
+ dst_paddr = round_down(dst_paddr, 16);
+ len = round_up(size, 16);
+ }
+
+ ret = __sev_dbg_enc_dec(kvm, paddr, dst_paddr, len, error, true);
+e_free:
+ if (src_tpage)
+ __free_page(src_tpage);
+ if (dst_tpage)
+ __free_page(dst_tpage);
+ return ret;
+}
+
+static int sev_dbg_encrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ unsigned long vaddr, vaddr_end, dst_vaddr, next_vaddr;
+ struct kvm_sev_dbg debug;
+ int ret, size;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&debug, (void *)argp->data,
+ sizeof(struct kvm_sev_dbg)))
+ return -EFAULT;
+
+ size = debug.length;
+ vaddr = debug.src_addr;
+ vaddr_end = vaddr + size;
+ dst_vaddr = debug.dst_addr;
+
+ for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+ unsigned long n;
+ int s_off, d_off, len;
+ struct page **srcpage, **dstpage;
+
+ /* lock the user memory */
+ srcpage = sev_pin_memory(vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+ if (!srcpage)
+ return -EFAULT;
+
+ dstpage = sev_pin_memory(dst_vaddr & PAGE_MASK, PAGE_SIZE,
+ &n, 1);
+ if (!dstpage) {
+ sev_unpin_memory(srcpage, n);
+ return -EFAULT;
+ }
+
+ /* flush the caches to ensure that DRAM has recent contents */
+ sev_clflush_pages(srcpage, 1);
+ sev_clflush_pages(dstpage, 1);
+
+ /*
+ * since user buffer may not be page aligned, calculate the
+ * offset within the page.
+ */
+ s_off = vaddr & ~PAGE_MASK;
+ d_off = dst_vaddr & ~PAGE_MASK;
+ len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+ ret = __sev_dbg_enc(kvm,
+ vaddr,
+ __sme_page_pa(srcpage[0]) + s_off,
+ dst_vaddr,
+ __sme_page_pa(dstpage[0]) + d_off,
+ len, &argp->error);
+
+ /* unlock the user memory */
+ sev_unpin_memory(srcpage, 1);
+ sev_unpin_memory(dstpage, 1);
+
+ if (ret)
+ goto err;
+
+ next_vaddr = vaddr + len;
+ dst_vaddr = dst_vaddr + len;
+ size -= len;
+ }
+err:
+ return ret;
+}
+
static int svm_memory_encryption_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -6253,6 +6423,10 @@ static int svm_memory_encryption_op(struct kvm *kvm, void __user *argp)
r = sev_dbg_decrypt(kvm, &sev_cmd);
break;
}
+ case KVM_SEV_DBG_ENCRYPT: {
+ r = sev_dbg_encrypt(kvm, &sev_cmd);
+ break;
+ }
default:
break;
}
--
2.9.4
next prev parent reply other threads:[~2017-07-24 20:11 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-07-24 20:02 [RFC Part2 PATCH v3 00/26] x86: Secure Encrypted Virtualization (AMD) Brijesh Singh
2017-07-24 20:02 ` [RFC Part2 PATCH v3 01/26] Documentation/virtual/kvm: Add AMD Secure Encrypted Virtualization (SEV) Brijesh Singh
2017-09-05 17:21 ` Borislav Petkov
2017-09-05 21:39 ` Brijesh Singh
2017-09-05 22:06 ` Borislav Petkov
2017-09-06 16:41 ` Borislav Petkov
2017-09-06 20:54 ` Brijesh Singh
2017-07-24 20:02 ` [RFC Part2 PATCH v3 02/26] crypto: ccp: Add Platform Security Processor (PSP) device support Brijesh Singh
2017-07-25 8:29 ` Kamil Konieczny
2017-07-25 15:00 ` Brijesh Singh
2017-09-06 17:00 ` Borislav Petkov
2017-09-06 20:38 ` Brijesh Singh
2017-09-06 20:46 ` Borislav Petkov
2017-09-06 21:26 ` Gary R Hook
2017-09-07 10:34 ` Borislav Petkov
2017-09-07 14:27 ` Borislav Petkov
2017-09-07 22:19 ` Brijesh Singh
2017-09-07 23:15 ` Gary R Hook
2017-09-08 8:22 ` Borislav Petkov
2017-09-08 8:40 ` Borislav Petkov
2017-09-08 13:54 ` Brijesh Singh
2017-09-08 16:06 ` Brijesh Singh
2017-07-24 20:02 ` [RFC Part2 PATCH v3 03/26] crypto: ccp: Add Secure Encrypted Virtualization (SEV) " Brijesh Singh
2017-09-12 14:02 ` Borislav Petkov
2017-09-12 15:32 ` Brijesh Singh
2017-09-12 16:29 ` Borislav Petkov
2017-09-13 14:17 ` Borislav Petkov
2017-09-13 15:18 ` Brijesh Singh
2017-07-24 20:02 ` [RFC Part2 PATCH v3 04/26] KVM: SVM: Prepare to reserve asid for SEV guest Brijesh Singh
2017-09-12 19:54 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 05/26] KVM: SVM: Reserve ASID range " Brijesh Singh
2017-09-12 20:04 ` Borislav Petkov
2017-09-12 20:24 ` Brijesh Singh
2017-09-12 20:28 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 06/26] KVM: SVM: Prepare for new bit definition in nested_ctl Brijesh Singh
2017-09-12 20:06 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 07/26] KVM: SVM: Add SEV feature definitions to KVM Brijesh Singh
2017-09-12 20:08 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 08/26] KVM: X86: Extend CPUID range to include new leaf Brijesh Singh
2017-09-12 20:12 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 09/26] KVM: Introduce KVM_MEMORY_ENCRYPT_OP ioctl Brijesh Singh
2017-09-12 20:19 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 10/26] KVM: Introduce KVM_MEMORY_ENCRYPT_REGISTER/UNREGISTER_RAM ioctl Brijesh Singh
2017-09-12 20:29 ` Borislav Petkov
2017-09-12 20:50 ` Brijesh Singh
2017-09-12 21:08 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 11/26] KVM: X86: Extend struct kvm_arch to include SEV information Brijesh Singh
2017-09-13 13:37 ` Borislav Petkov
2017-09-13 15:14 ` Brijesh Singh
2017-09-13 15:21 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 12/26] KVM: Define SEV key management command id Brijesh Singh
2017-09-13 13:45 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 13/26] KVM: SVM: Add KVM_SEV_INIT command Brijesh Singh
2017-09-13 15:06 ` Borislav Petkov
2017-09-13 16:23 ` Brijesh Singh
2017-09-13 16:37 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 14/26] KVM: SVM: VMRUN should use assosiated ASID when SEV is enabled Brijesh Singh
2017-09-13 15:37 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 15/26] KVM: SVM: Add support for SEV LAUNCH_START command Brijesh Singh
2017-09-13 17:25 ` Borislav Petkov
2017-09-13 18:23 ` Brijesh Singh
2017-09-13 18:37 ` Borislav Petkov
2017-09-13 18:58 ` Brijesh Singh
2017-09-13 21:02 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 16/26] KVM: SVM: Add support for SEV LAUNCH_UPDATE_DATA command Brijesh Singh
2017-09-13 17:55 ` Borislav Petkov
2017-09-13 19:45 ` Brijesh Singh
2017-09-13 21:07 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 17/26] KVM: SVM: Add support for SEV LAUNCH_MEASURE command Brijesh Singh
2017-09-14 10:20 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 18/26] KVM: SVM: Add support for SEV LAUNCH_FINISH command Brijesh Singh
2017-09-14 10:24 ` Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 19/26] KVM: svm: Add support for SEV GUEST_STATUS command Brijesh Singh
2017-09-14 10:35 ` Borislav Petkov
2017-09-14 11:25 ` Brijesh Singh
2017-07-24 20:02 ` [RFC Part2 PATCH v3 20/26] KVM: SVM: Add support for SEV DEBUG_DECRYPT command Brijesh Singh
2017-09-14 11:08 ` Borislav Petkov
2017-07-24 20:02 ` Brijesh Singh [this message]
2017-09-14 13:32 ` [RFC Part2 PATCH v3 21/26] KVM: SVM: Add support for SEV DEBUG_ENCRYPT command Borislav Petkov
2017-07-24 20:02 ` [RFC Part2 PATCH v3 22/26] KVM: SVM: Pin guest memory when SEV is active Brijesh Singh
2017-09-14 14:00 ` Borislav Petkov
2017-07-24 20:03 ` [RFC Part2 PATCH v3 23/26] KVM: X86: Add memory encryption enabled ops Brijesh Singh
2017-09-14 14:09 ` Borislav Petkov
2017-07-24 20:03 ` [RFC Part2 PATCH v3 24/26] KVM: SVM: Clear C-bit from the page fault address Brijesh Singh
2017-09-14 14:35 ` Borislav Petkov
2017-07-24 20:03 ` [RFC Part2 PATCH v3 25/26] KVM: SVM: Do not install #UD intercept when SEV is enabled Brijesh Singh
2017-09-14 14:56 ` Borislav Petkov
2017-07-24 20:03 ` [RFC Part2 PATCH v3 26/26] KVM: X86: Restart the guest when insn_len is zero and " Brijesh Singh
2017-09-14 15:40 ` Borislav Petkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170724200303.12197-22-brijesh.singh@amd.com \
--to=brijesh.singh@amd.com \
--cc=bp@suse.de \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).