All of lore.kernel.org
 help / color / mirror / Atom feed
From: isaku.yamahata@intel.com
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com,
	Paolo Bonzini <pbonzini@redhat.com>,
	erdemaktas@google.com, Sean Christopherson <seanjc@google.com>,
	Sagi Shahar <sagis@google.com>
Subject: [RFC PATCH 03/13] KVM: TDX: Pass page level to cache flush before TDX SEAMCALL
Date: Sun,  7 Aug 2022 15:18:36 -0700	[thread overview]
Message-ID: <01bfd080f85afffcffeba96692be726294faa18d.1659854957.git.isaku.yamahata@intel.com> (raw)
In-Reply-To: <cover.1659854957.git.isaku.yamahata@intel.com>

From: Xiaoyao Li <xiaoyao.li@intel.com>

tdh_mem_page_aug() will support 2MB large page in the near future.  Cache
flush also needs to be 2MB instead of 4KB in such cases.  Introduce a helper
function to flush cache with page size info in preparation for large pages.

Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
index a50bc1445cc2..9accf2fe04ae 100644
--- a/arch/x86/kvm/vmx/tdx_ops.h
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -6,6 +6,7 @@
 
 #include <linux/compiler.h>
 
+#include <asm/pgtable_types.h>
 #include <asm/cacheflush.h>
 #include <asm/asm.h>
 #include <asm/kvm_host.h>
@@ -18,6 +19,11 @@
 
 void pr_tdx_error(u64 op, u64 error_code, const struct tdx_module_output *out);
 
+static inline void tdx_clflush_page(hpa_t addr, enum pg_level level)
+{
+	clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level));
+}
+
 /*
  * Although seamcal_lock protects seamcall to avoid contention inside the TDX
  * module, it doesn't protect TDH.VP.ENTER.  With zero-step attack mitigation,
@@ -40,21 +46,21 @@ static inline u64 seamcall_sept_retry(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9,
 
 static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
 {
-	clflush_cache_range(__va(addr), PAGE_SIZE);
+	tdx_clflush_page(addr, PG_LEVEL_4K);
 	return __seamcall(TDH_MNG_ADDCX, addr, tdr, 0, 0, NULL);
 }
 
 static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source,
 				   struct tdx_module_output *out)
 {
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return seamcall_sept_retry(TDH_MEM_PAGE_ADD, gpa, tdr, hpa, source, out);
 }
 
 static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
 				   struct tdx_module_output *out)
 {
-	clflush_cache_range(__va(page), PAGE_SIZE);
+	tdx_clflush_page(page, PG_LEVEL_4K);
 	return seamcall_sept_retry(TDH_MEM_SEPT_ADD, gpa | level, tdr, page, 0,
 				   out);
 }
@@ -67,21 +73,21 @@ static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level,
 
 static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
 {
-	clflush_cache_range(__va(addr), PAGE_SIZE);
+	tdx_clflush_page(addr, PG_LEVEL_4K);
 	return __seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, NULL);
 }
 
 static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
 					struct tdx_module_output *out)
 {
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return __seamcall(TDH_MEM_PAGE_RELOCATE, gpa, tdr, hpa, 0, out);
 }
 
 static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
 				   struct tdx_module_output *out)
 {
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return seamcall_sept_retry(TDH_MEM_PAGE_AUG, gpa, tdr, hpa, 0, out);
 }
 
@@ -99,13 +105,13 @@ static inline u64 tdh_mng_key_config(hpa_t tdr)
 
 static inline u64 tdh_mng_create(hpa_t tdr, int hkid)
 {
-	clflush_cache_range(__va(tdr), PAGE_SIZE);
+	tdx_clflush_page(tdr, PG_LEVEL_4K);
 	return __seamcall(TDH_MNG_CREATE, tdr, hkid, 0, 0, NULL);
 }
 
 static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
 {
-	clflush_cache_range(__va(tdvpr), PAGE_SIZE);
+	tdx_clflush_page(tdvpr, PG_LEVEL_4K);
 	return __seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, NULL);
 }
 
-- 
2.25.1


  parent reply	other threads:[~2022-08-07 22:32 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-07 22:18 [RFC PATCH 00/13] KVM TDX: TDP MMU: large page support isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 01/13] KVM: Update lpage info when private/shared memory are mixed isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 02/13] KVM: TDP_MMU: Go to next level if smaller private mapping exists isaku.yamahata
2022-08-07 22:18 ` isaku.yamahata [this message]
2022-08-07 22:18 ` [RFC PATCH 04/13] KVM: TDX: Pass KVM page level to tdh_mem_page_add() and tdh_mem_page_aug() isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 05/13] KVM: TDX: Pass size to tdx_measure_page() isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 06/13] KVM: TDX: Pass size to reclaim_page() isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 07/13] KVM: TDX: Update tdx_sept_{set,drop}_private_spte() to support large page isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 08/13] KVM: MMU: Introduce level info in PFERR code isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 09/13] KVM: TDX: Pin pages via get_page() right before ADD/AUG'ed to TDs isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 10/13] KVM: MMU: Pass desired page level in err code for page fault handler isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 11/13] KVM: TDP_MMU: Split the large page when zap leaf isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 12/13] KVM: TDX: Split a large page when 4KB page within it converted to shared isaku.yamahata
2022-08-07 22:18 ` [RFC PATCH 13/13] KVM: x86: remove struct kvm_arch.tdp_max_page_level isaku.yamahata
2022-08-08  5:40   ` Xiaoyao Li
2022-08-11 22:56     ` Isaku Yamahata

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=01bfd080f85afffcffeba96692be726294faa18d.1659854957.git.isaku.yamahata@intel.com \
    --to=isaku.yamahata@intel.com \
    --cc=erdemaktas@google.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sagis@google.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.