From: Bharata B Rao <bharata@linux.ibm.com>
To: Ram Pai <linuxram@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org, kvm-ppc@vger.kernel.org,
farosas@linux.ibm.com
Subject: Re: [RFC v1 2/2] KVM: PPC: Book3S HV: abstract secure VM related calls.
Date: Mon, 12 Oct 2020 20:58:36 +0530 [thread overview]
Message-ID: <20201012152836.GK185637@in.ibm.com> (raw)
In-Reply-To: <1602487663-7321-3-git-send-email-linuxram@us.ibm.com>
On Mon, Oct 12, 2020 at 12:27:43AM -0700, Ram Pai wrote:
> Abstract the secure VM related calls into generic calls.
>
> These generic calls will call the corresponding method of the
> backend that prvoides the implementation to support secure VM.
>
> Currently there is only the ultravisor based implementation.
> Modify that implementation to act as a backed to the generic calls.
>
> This plumbing will provide the flexibility to add more backends
> in the future.
>
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> ---
> arch/powerpc/include/asm/kvm_book3s_uvmem.h | 100 -----------
> arch/powerpc/include/asm/kvmppc_svm_backend.h | 250 ++++++++++++++++++++++++++
> arch/powerpc/kvm/book3s_64_mmu_radix.c | 6 +-
> arch/powerpc/kvm/book3s_hv.c | 28 +--
> arch/powerpc/kvm/book3s_hv_uvmem.c | 78 ++++++--
> 5 files changed, 327 insertions(+), 135 deletions(-)
> delete mode 100644 arch/powerpc/include/asm/kvm_book3s_uvmem.h
> create mode 100644 arch/powerpc/include/asm/kvmppc_svm_backend.h
>
> diff --git a/arch/powerpc/include/asm/kvmppc_svm_backend.h b/arch/powerpc/include/asm/kvmppc_svm_backend.h
> new file mode 100644
> index 0000000..be60d80
> --- /dev/null
> +++ b/arch/powerpc/include/asm/kvmppc_svm_backend.h
> @@ -0,0 +1,250 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + *
> + * Copyright IBM Corp. 2020
> + *
> + * Authors: Ram Pai <linuxram@us.ibm.com>
> + */
> +
> +#ifndef __POWERPC_KVMPPC_SVM_BACKEND_H__
> +#define __POWERPC_KVMPPC_SVM_BACKEND_H__
> +
> +#include <linux/mutex.h>
> +#include <linux/timer.h>
> +#include <linux/types.h>
> +#include <linux/kvm_types.h>
> +#include <linux/kvm_host.h>
> +#include <linux/bug.h>
> +#ifdef CONFIG_PPC_BOOK3S
> +#include <asm/kvm_book3s.h>
> +#else
> +#include <asm/kvm_booke.h>
> +#endif
> +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
> +#include <asm/paca.h>
> +#include <asm/xive.h>
> +#include <asm/cpu_has_feature.h>
> +#endif
> +
> +struct kvmppc_hmm_backend {
Though we started with HMM initially, what we ended up with eventually
has nothing to do with HMM. Please don't introduce hmm again :-)
> + /* initialize */
> + int (*kvmppc_secmem_init)(void);
> +
> + /* cleanup */
> + void (*kvmppc_secmem_free)(void);
> +
> + /* is memory available */
> + bool (*kvmppc_secmem_available)(void);
> +
> + /* allocate a protected/secure page for the secure VM */
> + unsigned long (*kvmppc_svm_page_in)(struct kvm *kvm,
> + unsigned long gra,
> + unsigned long flags,
> + unsigned long page_shift);
> +
> + /* recover the protected/secure page from the secure VM */
> + unsigned long (*kvmppc_svm_page_out)(struct kvm *kvm,
> + unsigned long gra,
> + unsigned long flags,
> + unsigned long page_shift);
> +
> + /* initiate the transition of a VM to secure VM */
> + unsigned long (*kvmppc_svm_init_start)(struct kvm *kvm);
> +
> + /* finalize the transition of a secure VM */
> + unsigned long (*kvmppc_svm_init_done)(struct kvm *kvm);
> +
> + /* share the page on page fault */
> + int (*kvmppc_svm_page_share)(struct kvm *kvm, unsigned long gfn);
> +
> + /* abort the transition to a secure VM */
> + unsigned long (*kvmppc_svm_init_abort)(struct kvm *kvm);
> +
> + /* add a memory slot */
> + int (*kvmppc_svm_memslot_create)(struct kvm *kvm,
> + const struct kvm_memory_slot *new);
> +
> + /* free a memory slot */
> + void (*kvmppc_svm_memslot_delete)(struct kvm *kvm,
> + const struct kvm_memory_slot *old);
> +
> + /* drop pages allocated to the secure VM */
> + void (*kvmppc_svm_drop_pages)(const struct kvm_memory_slot *free,
> + struct kvm *kvm, bool skip_page_out);
> +};
Since the structure has kvmppc_ prefix, may be you can drop
the same from its members to make the fields smaller?
> +
> +extern const struct kvmppc_hmm_backend *kvmppc_svm_backend;
> +
> +static inline int kvmppc_svm_page_share(struct kvm *kvm, unsigned long gfn)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_page_share(kvm,
> + gfn);
> +}
> +
> +static inline void kvmppc_svm_drop_pages(const struct kvm_memory_slot *memslot,
> + struct kvm *kvm, bool skip_page_out)
> +{
> + if (!kvmppc_svm_backend)
> + return;
> +
> + kvmppc_svm_backend->kvmppc_svm_drop_pages(memslot,
> + kvm, skip_page_out);
> +}
> +
> +static inline int kvmppc_svm_page_in(struct kvm *kvm,
> + unsigned long gpa,
> + unsigned long flags,
> + unsigned long page_shift)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_page_in(kvm,
> + gpa, flags, page_shift);
> +}
> +
> +static inline int kvmppc_svm_page_out(struct kvm *kvm,
> + unsigned long gpa,
> + unsigned long flags,
> + unsigned long page_shift)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_page_out(kvm,
> + gpa, flags, page_shift);
> +}
> +
> +static inline int kvmppc_svm_init_start(struct kvm *kvm)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_init_start(kvm);
> +}
> +
> +static inline int kvmppc_svm_init_done(struct kvm *kvm)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_init_done(kvm);
> +}
> +
> +static inline int kvmppc_svm_init_abort(struct kvm *kvm)
> +{
> + if (!kvmppc_svm_backend)
> + return -ENODEV;
> +
> + return kvmppc_svm_backend->kvmppc_svm_init_abort(kvm);
> +}
> +
> +static inline void kvmppc_svm_memslot_create(struct kvm *kvm,
> + const struct kvm_memory_slot *memslot)
> +{
> + if (!kvmppc_svm_backend)
> + return;
> +
> + kvmppc_svm_backend->kvmppc_svm_memslot_create(kvm,
> + memslot);
> +}
> +
> +static inline void kvmppc_svm_memslot_delete(struct kvm *kvm,
> + const struct kvm_memory_slot *memslot)
> +{
> + if (!kvmppc_svm_backend)
> + return;
> +
> + kvmppc_svm_backend->kvmppc_svm_memslot_delete(kvm,
> + memslot);
> +}
> +
> +static inline int kvmppc_secmem_init(void)
> +{
> +#ifdef CONFIG_PPC_UV
> + extern const struct kvmppc_hmm_backend kvmppc_uvmem_backend;
> +
> + kvmppc_svm_backend = NULL;
> + if (kvmhv_on_pseries()) {
> + /* @TODO add the protected memory backend */
> + return 0;
> + }
> +
> + kvmppc_svm_backend = &kvmppc_uvmem_backend;
> +
> + if (!kvmppc_svm_backend->kvmppc_secmem_init) {
You have a function named kvmppc_secmem_init() and the field
named the same, can be confusing.
> + pr_err("KVM-HV: kvmppc_svm_backend has no %s\n", __func__);
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_secmem_free) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_secmem_free()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_secmem_available) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_secmem_available()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_page_in) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_page_in()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_page_out) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_page_out()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_init_start) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_init_start()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_init_done) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_init_done()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_page_share) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_page_share()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_init_abort) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_init_abort()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_memslot_create) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_memslot_create()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_memslot_delete) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_memslot_delete()\n");
> + goto err;
> + }
> + if (!kvmppc_svm_backend->kvmppc_svm_drop_pages) {
> + pr_err("KVM-HV: kvmppc_svm_backend has no kvmppc_svm_drop_pages()\n");
> + goto err;
> + }
Do you really need to check each and every callback like above?
If so, may be the check can be optimized?
Regards,
Bharata.
next prev parent reply other threads:[~2020-10-12 15:31 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-12 7:27 [RFC v1 0/2] Plumbing to support multiple secure memory backends Ram Pai
2020-10-12 7:27 ` [RFC v1 1/2] KVM: PPC: Book3S HV: rename all variables in book3s_hv_uvmem.c Ram Pai
2020-10-12 7:27 ` [RFC v1 2/2] KVM: PPC: Book3S HV: abstract secure VM related calls Ram Pai
2020-10-12 15:28 ` Bharata B Rao [this message]
2020-10-12 16:13 ` Ram Pai
2020-10-14 6:31 ` [RFC v1 0/2] Plumbing to support multiple secure memory backends Christoph Hellwig
2020-10-14 19:30 ` Ram Pai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201012152836.GK185637@in.ibm.com \
--to=bharata@linux.ibm.com \
--cc=farosas@linux.ibm.com \
--cc=kvm-ppc@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=linuxram@us.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).