All of lore.kernel.org
 help / color / mirror / Atom feed
From: isaku.yamahata@intel.com
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com,
	Paolo Bonzini <pbonzini@redhat.com>,
	erdemaktas@google.com, Sean Christopherson <seanjc@google.com>,
	Sagi Shahar <sagis@google.com>, Kai Huang <kai.huang@intel.com>,
	chen.bo@intel.com, hang.yuan@intel.com, tina.zhang@intel.com,
	Xiaoyao Li <xiaoyao.li@intel.com>,
	Binbin Wu <binbin.wu@linux.intel.com>
Subject: [PATCH v8 02/14] KVM: TDX: Flush cache based on page size before TDX SEAMCALL
Date: Mon, 26 Feb 2024 00:29:16 -0800	[thread overview]
Message-ID: <544c18765a2778afc4d3964629f116985554d1ee.1708933624.git.isaku.yamahata@intel.com> (raw)
In-Reply-To: <cover.1708933624.git.isaku.yamahata@intel.com>

From: Xiaoyao Li <xiaoyao.li@intel.com>

tdh_mem_page_aug() will support 2MB large page in the near future.  Cache
flush also needs to be 2MB instead of 4KB in such cases.  Introduce a
helper function to flush cache with page size info in preparation for large
pages.

Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
---
v6:
- catch up tdx_seamcall() change
---
 arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
index d27f281152cb..3af124711e98 100644
--- a/arch/x86/kvm/vmx/tdx_ops.h
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -6,6 +6,7 @@
 
 #include <linux/compiler.h>
 
+#include <asm/pgtable_types.h>
 #include <asm/cacheflush.h>
 #include <asm/asm.h>
 #include <asm/kvm_host.h>
@@ -50,6 +51,11 @@ static inline int pg_level_to_tdx_sept_level(enum pg_level level)
 	return level - 1;
 }
 
+static inline void tdx_clflush_page(hpa_t addr, enum pg_level level)
+{
+	clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level));
+}
+
 /*
  * TDX module acquires its internal lock for resources.  It doesn't spin to get
  * locks because of its restrictions of allowed execution time.  Instead, it
@@ -87,7 +93,7 @@ static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
 		.rdx = tdr,
 	};
 
-	clflush_cache_range(__va(addr), PAGE_SIZE);
+	tdx_clflush_page(addr, PG_LEVEL_4K);
 	return tdx_seamcall(TDH_MNG_ADDCX, &in, NULL);
 }
 
@@ -101,7 +107,7 @@ static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source
 		.r9 = source,
 	};
 
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, &in, out);
 }
 
@@ -114,7 +120,7 @@ static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
 		.r8 = page,
 	};
 
-	clflush_cache_range(__va(page), PAGE_SIZE);
+	tdx_clflush_page(page, PG_LEVEL_4K);
 	return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, &in, out);
 }
 
@@ -147,7 +153,7 @@ static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
 		.rdx = tdvpr,
 	};
 
-	clflush_cache_range(__va(addr), PAGE_SIZE);
+	tdx_clflush_page(addr, PG_LEVEL_4K);
 	return tdx_seamcall(TDH_VP_ADDCX, &in, NULL);
 }
 
@@ -160,7 +166,7 @@ static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
 		.r8 = hpa,
 	};
 
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, &in, out);
 }
 
@@ -173,7 +179,7 @@ static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
 		.r8 = hpa,
 	};
 
-	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	tdx_clflush_page(hpa, PG_LEVEL_4K);
 	return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, &in, out);
 }
 
@@ -204,7 +210,7 @@ static inline u64 tdh_mng_create(hpa_t tdr, int hkid)
 		.rdx = hkid,
 	};
 
-	clflush_cache_range(__va(tdr), PAGE_SIZE);
+	tdx_clflush_page(tdr, PG_LEVEL_4K);
 	return tdx_seamcall(TDH_MNG_CREATE, &in, NULL);
 }
 
@@ -215,7 +221,7 @@ static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
 		.rdx = tdr,
 	};
 
-	clflush_cache_range(__va(tdvpr), PAGE_SIZE);
+	tdx_clflush_page(tdvpr, PG_LEVEL_4K);
 	return tdx_seamcall(TDH_VP_CREATE, &in, NULL);
 }
 
-- 
2.25.1


  parent reply	other threads:[~2024-02-26  8:29 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-26  8:29 [PATCH v8 00/14] KVM TDX: TDP MMU: large page support isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 01/14] KVM: Add transparent hugepage support for dedicated guest memory isaku.yamahata
2024-02-26  8:29 ` isaku.yamahata [this message]
2024-02-26  8:29 ` [PATCH v8 03/14] KVM: TDX: Pass KVM page level to tdh_mem_page_aug() isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 04/14] KVM: TDX: Pass size to reclaim_page() isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 05/14] KVM: TDX: Update tdx_sept_{set,drop}_private_spte() to support large page isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 06/14] KVM: MMU: Introduce level info in PFERR code isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 07/14] KVM: TDX: Pass desired page level in err code for page fault handler isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 08/14] KVM: x86/tdp_mmu: Allocate private page table for large page split isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 09/14] KVM: x86/tdp_mmu: Split the large page when zap leaf isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 10/14] KVM: x86/tdp_mmu, TDX: Split a large page when 4KB page within it converted to shared isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 11/14] KVM: x86/tdp_mmu: Try to merge pages into a large page isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 12/14] KVM: TDX: Implement " isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 13/14] KVM: x86/mmu: Make kvm fault handler aware of large page of private memslot isaku.yamahata
2024-02-26  8:29 ` [PATCH v8 14/14] KVM: TDX: Allow 2MB large page for TD GUEST isaku.yamahata
2024-03-27  0:53 ` [PATCH v8 00/14] KVM TDX: TDP MMU: large page support Yin, Fengwei
2024-03-27  4:15   ` Isaku Yamahata

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=544c18765a2778afc4d3964629f116985554d1ee.1708933624.git.isaku.yamahata@intel.com \
    --to=isaku.yamahata@intel.com \
    --cc=binbin.wu@linux.intel.com \
    --cc=chen.bo@intel.com \
    --cc=erdemaktas@google.com \
    --cc=hang.yuan@intel.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=kai.huang@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sagis@google.com \
    --cc=seanjc@google.com \
    --cc=tina.zhang@intel.com \
    --cc=xiaoyao.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.