From: Sean Christopherson <sean.j.christopherson@intel.com>
To: "Paolo Bonzini" <pbonzini@redhat.com>,
"Radim Krčmář" <rkrcmar@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 07/16] KVM: x86/mmu: Refactor handling of forced 4k pages in page faults
Date: Fri, 6 Dec 2019 15:57:20 -0800 [thread overview]
Message-ID: <20191206235729.29263-8-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20191206235729.29263-1-sean.j.christopherson@intel.com>
Refactor the page fault handlers and mapping_level() to track the max
allowed page level instead of only tracking if a 4k page is mandatory
due to one restriction or another. This paves the way for cleanly
consolidating tdp_page_fault() and nonpaging_page_fault(), and for
eliminating a redundant check on mmu_gfn_lpage_is_disallowed().
No functional change intended.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
arch/x86/kvm/mmu/mmu.c | 45 ++++++++++++++--------------------
arch/x86/kvm/mmu/paging_tmpl.h | 16 +++++++-----
2 files changed, 29 insertions(+), 32 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index bd1711201181..877924cbb75b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1324,18 +1324,19 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
- bool *force_pt_level)
+ int *max_levelp)
{
- int host_level, max_level;
+ int host_level, max_level = *max_levelp;
struct kvm_memory_slot *slot;
- if (unlikely(*force_pt_level))
+ if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
return PT_PAGE_TABLE_LEVEL;
slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
- *force_pt_level = !memslot_valid_for_gpte(slot, true);
- if (unlikely(*force_pt_level))
+ if (!memslot_valid_for_gpte(slot, true)) {
+ *max_levelp = PT_PAGE_TABLE_LEVEL;
return PT_PAGE_TABLE_LEVEL;
+ }
host_level = host_mapping_level(vcpu->kvm, large_gfn);
@@ -4169,9 +4170,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long mmu_seq;
gfn_t gfn = gpa >> PAGE_SHIFT;
bool write = error_code & PFERR_WRITE_MASK;
- bool force_pt_level, map_writable;
+ bool map_writable;
bool exec = error_code & PFERR_FETCH_MASK;
bool lpage_disallowed = exec && is_nx_huge_page_enabled();
+ int max_level;
/* Note, paging is disabled, ergo gva == gpa. */
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
@@ -4187,19 +4189,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
- force_pt_level = lpage_disallowed;
- level = mapping_level(vcpu, gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
- /*
- * This path builds a PAE pagetable - so we can map
- * 2mb pages at maximum. Therefore check if the level
- * is larger than that.
- */
- if (level > PT_DIRECTORY_LEVEL)
- level = PT_DIRECTORY_LEVEL;
+ /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
+ max_level = lpage_disallowed ? PT_PAGE_TABLE_LEVEL : PT_DIRECTORY_LEVEL;
+ level = mapping_level(vcpu, gfn, &max_level);
+ if (level > PT_PAGE_TABLE_LEVEL)
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
@@ -4219,7 +4214,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (likely(!force_pt_level))
+ if (likely(max_level > PT_PAGE_TABLE_LEVEL))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
prefault, false);
@@ -4273,7 +4268,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
kvm_pfn_t pfn;
int r;
int level;
- bool force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
@@ -4301,13 +4295,12 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
break;
}
- force_pt_level = lpage_disallowed || max_level == PT_PAGE_TABLE_LEVEL;
- level = mapping_level(vcpu, gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
- if (level > max_level)
- level = max_level;
+ if (lpage_disallowed)
+ max_level = PT_PAGE_TABLE_LEVEL;
+
+ level = mapping_level(vcpu, gfn, &max_level);
+ if (level > PT_PAGE_TABLE_LEVEL)
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
@@ -4327,7 +4320,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (likely(!force_pt_level))
+ if (likely(max_level > PT_PAGE_TABLE_LEVEL))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
prefault, lpage_disallowed);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index c1d7b866a03f..1938a6e4e631 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -778,7 +778,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
bool map_writable, is_self_change_mapping;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
is_nx_huge_page_enabled();
- bool force_pt_level = lpage_disallowed;
+ int max_level;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -818,14 +818,18 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
+ max_level = lpage_disallowed ? PT_PAGE_TABLE_LEVEL :
+ PT_MAX_HUGEPAGE_LEVEL;
+
if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
- level = mapping_level(vcpu, walker.gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
+ level = mapping_level(vcpu, walker.gfn, &max_level);
+ if (likely(max_level > PT_DIRECTORY_LEVEL)) {
level = min(walker.level, level);
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
- } else
- force_pt_level = true;
+ } else {
+ max_level = PT_PAGE_TABLE_LEVEL;
+ }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -865,7 +869,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (!force_pt_level)
+ if (max_level > PT_PAGE_TABLE_LEVEL)
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
level, pfn, map_writable, prefault, lpage_disallowed);
--
2.24.0
next prev parent reply other threads:[~2019-12-06 23:58 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-06 23:57 [PATCH 00/16] KVM: x86: MMU page fault clean-up Sean Christopherson
2019-12-06 23:57 ` [PATCH 01/16] KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM Sean Christopherson
2019-12-06 23:57 ` [PATCH 02/16] KVM: x86/mmu: Move definition of make_mmu_pages_available() up Sean Christopherson
2019-12-06 23:57 ` [PATCH 03/16] KVM: x86/mmu: Fold nonpaging_map() into nonpaging_page_fault() Sean Christopherson
2019-12-06 23:57 ` [PATCH 04/16] KVM: x86/mmu: Move nonpaging_page_fault() below try_async_pf() Sean Christopherson
2019-12-06 23:57 ` [PATCH 05/16] KVM: x86/mmu: Refactor handling of cache consistency with TDP Sean Christopherson
2019-12-06 23:57 ` [PATCH 06/16] KVM: x86/mmu: Refactor the per-slot level calculation in mapping_level() Sean Christopherson
2019-12-06 23:57 ` Sean Christopherson [this message]
2019-12-06 23:57 ` [PATCH 08/16] KVM: x86/mmu: Incorporate guest's page level into max level for shadow MMU Sean Christopherson
2019-12-06 23:57 ` [PATCH 09/16] KVM: x86/mmu: Persist gfn_lpage_is_disallowed() to max_level Sean Christopherson
2019-12-06 23:57 ` [PATCH 10/16] KVM: x86/mmu: Rename lpage_disallowed to account_disallowed_nx_lpage Sean Christopherson
2019-12-06 23:57 ` [PATCH 11/16] KVM: x86/mmu: Consolidate tdp_page_fault() and nonpaging_page_fault() Sean Christopherson
2019-12-06 23:57 ` [PATCH 12/16] KVM: x86/mmu: Move transparent_hugepage_adjust() above __direct_map() Sean Christopherson
2019-12-06 23:57 ` [PATCH 13/16] KVM: x86/mmu: Move calls to thp_adjust() down a level Sean Christopherson
2019-12-06 23:57 ` [PATCH 14/16] KVM: x86/mmu: Move root_hpa validity checks to top of page fault handler Sean Christopherson
2019-12-06 23:57 ` [PATCH 15/16] KVM: x86/mmu: WARN on an invalid root_hpa Sean Christopherson
2019-12-06 23:57 ` [PATCH 16/16] KVM: x86/mmu: WARN if root_hpa is invalid when handling a page fault Sean Christopherson
2019-12-09 15:31 ` [PATCH 00/16] KVM: x86: MMU page fault clean-up Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191206235729.29263-8-sean.j.christopherson@intel.com \
--to=sean.j.christopherson@intel.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).