From 4fb32ad7c949d5ec6b6ea364d3388b50bf674c9c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 30 Jun 2021 12:20:12 -0400 Subject: [PATCH] mm/mprotect: Optimize layout of may_avoid_write_fault() Firstly move VM_WRITE check to be outside of !MM_CP_DIRTY_ACCT chunk, so as to make it clear that we won't accidentally set the write bit to !VM_WRITE vmas. The old logic is hard to read in that it was written in reversed logic. Put things backward by moving the soft-dirty and uffd-wp checks earlier. Make the NUMA check even earlier than those as it's a cheap check and straightforward. Make the only "return true" case to be either the MM_CP_DIRTY_ACCT (which stands for the VM_SHARED cases when write bit can be applied), or the special anonymous page when we exclusively own it. Signed-off-by: Peter Xu --- mm/mprotect.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index 4cb240fd9936..3977bfd55f62 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -40,17 +40,20 @@ static bool may_avoid_write_fault(pte_t pte, struct vm_area_struct *vma, unsigned long cp_flags) { /* - * The dirty accountable bit indicates that we can always make the page - * writable regardless of the number of references. + * It is unclear whether this optimization can be done safely for NUMA + * pages. */ - if (!(cp_flags & MM_CP_DIRTY_ACCT)) { - /* Otherwise, we must have exclusive access to the page. */ - if (!(vma_is_anonymous(vma) && (vma->vm_flags & VM_WRITE))) - return false; + if (cp_flags & MM_CP_PROT_NUMA) + return false; - if (page_count(pte_page(pte)) != 1) - return false; - } + /* + * Never apply write bit if VM_WRITE not set. Note that this is + * actually checked for VM_SHARED when MM_CP_DIRTY_ACCT is set, so + * logically we only need to check it for !MM_CP_DIRTY_ACCT, but just + * make it even more obvious. + */ + if (!(vma->vm_flags & VM_WRITE)) + return false; /* * Don't do this optimization for clean pages as we need to be notified @@ -71,13 +74,21 @@ static bool may_avoid_write_fault(pte_t pte, struct vm_area_struct *vma, return false; /* - * It is unclear whether this optimization can be done safely for NUMA - * pages. + * MM_CP_DIRTY_ACCT indicates that we can always make the page writable + * regardless of the number of references. Time to set the write bit. */ - if (cp_flags & MM_CP_PROT_NUMA) - return false; + if (cp_flags & MM_CP_DIRTY_ACCT) + return true; + + /* + * Othewise it means !MM_CP_DIRTY_ACCT. We can only apply write bit + * early if it's anonymous page and we exclusively own it. + */ + if (vma_is_anonymous(vma) && (page_count(pte_page(pte)) == 1)) + return true; - return true; + /* Don't play any trick */ + return false; } static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, -- 2.31.1