From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Hugh Dickins <hugh@veritas.com>, Andrew Morton <akpm@osdl.org>,
David Howells <dhowells@redhat.com>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Christoph Lameter <christoph@lameter.com>,
Martin Bligh <mbligh@google.com>, Nick Piggin <npiggin@suse.de>,
Linus Torvalds <torvalds@osdl.org>
Subject: [PATCH 4/6] mm: optimize the new mprotect() code a bit
Date: Mon, 19 Jun 2006 19:53:26 +0200 [thread overview]
Message-ID: <20060619175326.24655.90153.sendpatchset@lappy> (raw)
In-Reply-To: <20060619175243.24655.76005.sendpatchset@lappy>
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
mprotect() resets the page protections, which could result in extra write
faults for those pages whos dirty state we track using write faults
and are dirty already.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
mm/mprotect.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
Index: 2.6-mm/mm/mprotect.c
===================================================================
--- 2.6-mm.orig/mm/mprotect.c 2006-06-19 16:19:42.000000000 +0200
+++ 2.6-mm/mm/mprotect.c 2006-06-19 16:20:42.000000000 +0200
@@ -28,7 +28,8 @@
#include <asm/tlbflush.h>
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end, pgprot_t newprot)
+ unsigned long addr, unsigned long end, pgprot_t newprot,
+ int is_accountable)
{
pte_t *pte, oldpte;
spinlock_t *ptl;
@@ -43,7 +44,13 @@ static void change_pte_range(struct mm_s
* bits by wiping the pte and then setting the new pte
* into place.
*/
- ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
+ ptent = ptep_get_and_clear(mm, addr, pte);
+ ptent = pte_modify(ptent, newprot);
+ /* Avoid taking write faults for pages we know to be
+ * dirty.
+ */
+ if (is_accountable && pte_dirty(ptent))
+ ptent = pte_mkwrite(ptent);
set_pte_at(mm, addr, pte, ptent);
lazy_mmu_prot_update(ptent);
#ifdef CONFIG_MIGRATION
@@ -67,7 +74,8 @@ static void change_pte_range(struct mm_s
}
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end, pgprot_t newprot)
+ unsigned long addr, unsigned long end, pgprot_t newprot,
+ int is_accountable)
{
pmd_t *pmd;
unsigned long next;
@@ -77,12 +85,13 @@ static inline void change_pmd_range(stru
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- change_pte_range(mm, pmd, addr, next, newprot);
+ change_pte_range(mm, pmd, addr, next, newprot, is_accountable);
} while (pmd++, addr = next, addr != end);
}
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end, pgprot_t newprot)
+ unsigned long addr, unsigned long end, pgprot_t newprot,
+ int is_accountable)
{
pud_t *pud;
unsigned long next;
@@ -92,12 +101,13 @@ static inline void change_pud_range(stru
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- change_pmd_range(mm, pud, addr, next, newprot);
+ change_pmd_range(mm, pud, addr, next, newprot, is_accountable);
} while (pud++, addr = next, addr != end);
}
static void change_protection(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end, pgprot_t newprot)
+ unsigned long addr, unsigned long end, pgprot_t newprot,
+ int is_accountable)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
@@ -111,7 +121,7 @@ static void change_protection(struct vm_
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- change_pud_range(mm, pgd, addr, next, newprot);
+ change_pud_range(mm, pgd, addr, next, newprot, is_accountable);
} while (pgd++, addr = next, addr != end);
flush_tlb_range(vma, start, end);
}
@@ -129,6 +139,7 @@ mprotect_fixup(struct vm_area_struct *vm
pgprot_t newprot;
pgoff_t pgoff;
int error;
+ int is_accountable = 0;
if (newflags == oldflags) {
*pprev = vma;
@@ -184,8 +195,10 @@ success:
if (is_shared_writable(newflags) && vma->vm_file)
mapping = vma->vm_file->f_mapping;
if ((mapping && mapping_cap_account_dirty(mapping)) ||
- (vma->vm_ops && vma->vm_ops->page_mkwrite))
+ (vma->vm_ops && vma->vm_ops->page_mkwrite)) {
mask &= ~VM_SHARED;
+ is_accountable = 1;
+ }
newprot = protection_map[newflags & mask];
@@ -198,7 +211,7 @@ success:
if (is_vm_hugetlb_page(vma))
hugetlb_change_protection(vma, start, end, newprot);
else
- change_protection(vma, start, end, newprot);
+ change_protection(vma, start, end, newprot, is_accountable);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
return 0;
next prev parent reply other threads:[~2006-06-19 17:53 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-06-19 17:52 [PATCH 0/6] mm: tracking dirty pages -v9 Peter Zijlstra
2006-06-19 17:52 ` [PATCH 1/6] mm: tracking shared dirty pages Peter Zijlstra
2006-06-22 5:56 ` Andrew Morton
2006-06-22 6:07 ` Christoph Lameter
2006-06-22 6:15 ` Andrew Morton
2006-06-22 11:33 ` Peter Zijlstra
2006-06-22 13:17 ` Hugh Dickins
2006-06-22 20:52 ` Hugh Dickins
2006-06-22 23:02 ` Peter Zijlstra
2006-06-22 23:39 ` [PATCH] mm: tracking shared dirty pages -v10 Peter Zijlstra
2006-06-23 3:10 ` Jeff Dike
2006-06-23 3:31 ` Andrew Morton
2006-06-23 3:50 ` Jeff Dike
2006-06-23 4:01 ` H. Peter Anvin
2006-06-23 15:08 ` Jeff Dike
2006-06-23 6:08 ` Linus Torvalds
2006-06-23 7:27 ` Hugh Dickins
2006-06-23 17:00 ` Christoph Lameter
2006-06-23 17:22 ` Peter Zijlstra
2006-06-23 17:52 ` Christoph Lameter
2006-06-23 18:11 ` Martin Bligh
2006-06-23 18:20 ` Linus Torvalds
2006-06-23 17:56 ` Linus Torvalds
2006-06-23 18:03 ` Peter Zijlstra
2006-06-23 18:23 ` Christoph Lameter
2006-06-23 18:41 ` Christoph Hellwig
2006-06-23 17:49 ` Linus Torvalds
2006-06-23 18:05 ` Arjan van de Ven
2006-06-23 18:08 ` Miklos Szeredi
2006-06-23 19:06 ` Hugh Dickins
2006-06-23 22:00 ` Peter Zijlstra
2006-06-23 22:35 ` Linus Torvalds
2006-06-23 22:44 ` Peter Zijlstra
2006-06-28 14:58 ` [RFC][PATCH] mm: fixup do_wp_page() Peter Zijlstra
2006-06-28 18:20 ` Hugh Dickins
2006-06-19 17:53 ` [PATCH 2/6] mm: balance dirty pages Peter Zijlstra
2006-06-19 17:53 ` [PATCH 3/6] mm: msync() cleanup Peter Zijlstra
2006-06-22 17:02 ` Hugh Dickins
2006-06-19 17:53 ` Peter Zijlstra [this message]
2006-06-22 17:21 ` [PATCH 4/6] mm: optimize the new mprotect() code a bit Hugh Dickins
2006-06-19 17:53 ` [PATCH 5/6] mm: small cleanup of install_page() Peter Zijlstra
2006-06-19 17:53 ` [PATCH 6/6] mm: remove some update_mmu_cache() calls Peter Zijlstra
2006-06-22 16:29 ` Hugh Dickins
2006-06-22 16:37 ` Christoph Lameter
2006-06-22 17:35 ` Hugh Dickins
2006-06-22 18:31 ` Christoph Lameter
-- strict thread matches above, loose matches on Subject: below --
2006-06-28 20:17 [PATCH 0/6] mm: tracking dirty pages -v14 Peter Zijlstra
2006-06-28 20:17 ` [PATCH 4/6] mm: optimize the new mprotect() code a bit Peter Zijlstra
2006-06-13 11:21 [PATCH 0/6] mm: tracking dirty pages -v8 Peter Zijlstra
2006-06-13 11:22 ` [PATCH 4/6] mm: optimize the new mprotect() code a bit Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20060619175326.24655.90153.sendpatchset@lappy \
--to=a.p.zijlstra@chello.nl \
--cc=akpm@osdl.org \
--cc=christoph@lameter.com \
--cc=dhowells@redhat.com \
--cc=hugh@veritas.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mbligh@google.com \
--cc=npiggin@suse.de \
--cc=torvalds@osdl.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).