Use correct page protection for put_dirty_page
diff mbox series

Message ID 20030511080841.GA31266@averell
State New, archived
Headers show
Series
  • Use correct page protection for put_dirty_page
Related show

Commit Message

Andi Kleen May 11, 2003, 8:08 a.m. UTC
put_page_dirty must use the page protection of the stack VMA, not hardcoded
PAGE_COPY. They can be different e.g. when the stack is set non executable
via VM_STACK_FLAGS.

I added an fallback path for now because I'm not sure if the stack VMA
is always predowngrowed here, if the printk better it may be also needed
to add an stack extension here.

-Andi

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Comments

William Lee Irwin III May 11, 2003, 8:30 a.m. UTC | #1
On Sun, May 11, 2003 at 10:08:41AM +0200, Andi Kleen wrote:
> put_page_dirty must use the page protection of the stack VMA, not hardcoded
> PAGE_COPY. They can be different e.g. when the stack is set non executable
> via VM_STACK_FLAGS.
> I added an fallback path for now because I'm not sure if the stack VMA
> is always predowngrowed here, if the printk better it may be also needed
> to add an stack extension here.

We know which vma is involved at the callsite and what we just set its
vma->vm_page_prot to; I suggest this patch instead.


-- wli


diff -prauN linux-2.5.69-1/fs/exec.c exec-2.5.69-1/fs/exec.c
--- linux-2.5.69-1/fs/exec.c	Wed Apr 16 15:34:51 2003
+++ exec-2.5.69-1/exec.c	Sun May 11 01:27:47 2003
@@ -314,7 +314,7 @@
 	}
 	lru_cache_add_active(page);
 	flush_dcache_page(page);
-	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY))));
+	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, protection_map[VM_STACK_FLAGS & 0x7]))));
 	pte_chain = page_add_rmap(page, pte, pte_chain);
 	pte_unmap(pte);
 	tsk->mm->rss++;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Andrew Morton May 11, 2003, 8:32 a.m. UTC | #2
Andi Kleen <ak@muc.de> wrote:
>
> put_page_dirty must use the page protection of the stack VMA, not hardcoded
>  PAGE_COPY. They can be different e.g. when the stack is set non executable
>  via VM_STACK_FLAGS.

OK.  It seems a bit inefficient to go looking up the vma immediately after
having created it.

How about we simply pass the desired protection in to put_dirty_page()?


 arch/ia64/ia32/binfmt_elf32.c  |    4 ++--
 arch/s390/kernel/compat_exec.c |    5 ++---
 arch/x86_64/ia32/ia32_binfmt.c |    6 ++----
 fs/exec.c                      |   13 +++++++------
 include/linux/mm.h             |    3 ++-
 5 files changed, 15 insertions(+), 16 deletions(-)

diff -puN arch/ia64/ia32/binfmt_elf32.c~put_dirty_page-protection-fix arch/ia64/ia32/binfmt_elf32.c
--- 25/arch/ia64/ia32/binfmt_elf32.c~put_dirty_page-protection-fix	2003-05-11 01:23:04.000000000 -0700
+++ 25-akpm/arch/ia64/ia32/binfmt_elf32.c	2003-05-11 01:23:50.000000000 -0700
@@ -12,6 +12,7 @@
 #include <linux/config.h>
 
 #include <linux/types.h>
+#include <linux/mm.h>
 
 #include <asm/param.h>
 #include <asm/signal.h>
@@ -40,7 +41,6 @@
 #define CLOCKS_PER_SEC	IA32_CLOCKS_PER_SEC
 
 extern void ia64_elf32_init (struct pt_regs *regs);
-extern void put_dirty_page (struct task_struct * tsk, struct page *page, unsigned long address);
 
 static void elf32_set_personality (void);
 
@@ -200,7 +200,7 @@ ia32_setup_arg_pages (struct linux_binpr
 		struct page *page = bprm->page[i];
 		if (page) {
 			bprm->page[i] = NULL;
-			put_dirty_page(current, page, stack_base);
+			put_dirty_page(current, page, stack_base, PAGE_COPY);
 		}
 		stack_base += PAGE_SIZE;
 	}
diff -puN arch/ia64/mm/init.c~put_dirty_page-protection-fix arch/ia64/mm/init.c
diff -puN arch/s390/kernel/compat_exec.c~put_dirty_page-protection-fix arch/s390/kernel/compat_exec.c
--- 25/arch/s390/kernel/compat_exec.c~put_dirty_page-protection-fix	2003-05-11 01:23:04.000000000 -0700
+++ 25-akpm/arch/s390/kernel/compat_exec.c	2003-05-11 01:24:20.000000000 -0700
@@ -18,6 +18,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
+#include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/binfmts.h>
@@ -32,8 +33,6 @@
 #endif
 
 
-extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address);
-
 #undef STACK_TOP
 #define STACK_TOP TASK31_SIZE
 
@@ -81,7 +80,7 @@ int setup_arg_pages32(struct linux_binpr
 		struct page *page = bprm->page[i];
 		if (page) {
 			bprm->page[i] = NULL;
-			put_dirty_page(current,page,stack_base);
+			put_dirty_page(current,page,stack_base,PAGE_COPY);
 		}
 		stack_base += PAGE_SIZE;
 	}
diff -puN arch/x86_64/ia32/ia32_binfmt.c~put_dirty_page-protection-fix arch/x86_64/ia32/ia32_binfmt.c
--- 25/arch/x86_64/ia32/ia32_binfmt.c~put_dirty_page-protection-fix	2003-05-11 01:23:04.000000000 -0700
+++ 25-akpm/arch/x86_64/ia32/ia32_binfmt.c	2003-05-11 01:24:38.000000000 -0700
@@ -13,6 +13,7 @@
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/binfmts.h>
+#include <linux/mm.h>
 #include <asm/segment.h> 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
@@ -272,9 +273,6 @@ static void elf32_init(struct pt_regs *r
 	set_thread_flag(TIF_IA32); 
 }
 
-extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address);
- 
-
 int setup_arg_pages(struct linux_binprm *bprm)
 {
 	unsigned long stack_base;
@@ -319,7 +317,7 @@ int setup_arg_pages(struct linux_binprm 
 		struct page *page = bprm->page[i];
 		if (page) {
 			bprm->page[i] = NULL;
-			put_dirty_page(current,page,stack_base);
+			put_dirty_page(current,page,stack_base,PAGE_COPY_EXEC);
 		}
 		stack_base += PAGE_SIZE;
 	}
diff -puN fs/exec.c~put_dirty_page-protection-fix fs/exec.c
--- 25/fs/exec.c~put_dirty_page-protection-fix	2003-05-11 01:23:05.000000000 -0700
+++ 25-akpm/fs/exec.c	2003-05-11 01:26:16.000000000 -0700
@@ -287,7 +287,8 @@ int copy_strings_kernel(int argc,char **
  *
  * tsk->mmap_sem is held for writing.
  */
-void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address)
+void put_dirty_page(struct task_struct *tsk, struct page *page,
+			unsigned long address, pgprot_t prot)
 {
 	pgd_t * pgd;
 	pmd_t * pmd;
@@ -295,7 +296,8 @@ void put_dirty_page(struct task_struct *
 	struct pte_chain *pte_chain;
 
 	if (page_count(page) != 1)
-		printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
+		printk(KERN_ERR "mem_map disagrees with %p at %08lx\n",
+				page, address);
 
 	pgd = pgd_offset(tsk->mm, address);
 	pte_chain = pte_chain_alloc(GFP_KERNEL);
@@ -314,7 +316,7 @@ void put_dirty_page(struct task_struct *
 	}
 	lru_cache_add_active(page);
 	flush_dcache_page(page);
-	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY))));
+	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
 	pte_chain = page_add_rmap(page, pte, pte_chain);
 	pte_unmap(pte);
 	tsk->mm->rss++;
@@ -421,7 +423,8 @@ int setup_arg_pages(struct linux_binprm 
 		struct page *page = bprm->page[i];
 		if (page) {
 			bprm->page[i] = NULL;
-			put_dirty_page(current,page,stack_base);
+			put_dirty_page(current, page, stack_base,
+					mpnt->vm_page_prot);
 		}
 		stack_base += PAGE_SIZE;
 	}
@@ -434,8 +437,6 @@ int setup_arg_pages(struct linux_binprm 
 
 #else
 
-#define put_dirty_page(tsk, page, address)
-#define setup_arg_pages(bprm)			(0)
 static inline void free_arg_pages(struct linux_binprm *bprm)
 {
 	int i;
diff -puN include/linux/mm.h~put_dirty_page-protection-fix include/linux/mm.h
--- 25/include/linux/mm.h~put_dirty_page-protection-fix	2003-05-11 01:23:05.000000000 -0700
+++ 25-akpm/include/linux/mm.h	2003-05-11 01:27:12.000000000 -0700
@@ -421,7 +421,8 @@ extern int handle_mm_fault(struct mm_str
 extern int make_pages_present(unsigned long addr, unsigned long end);
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
 extern long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long nonblock);
-
+void put_dirty_page(struct task_struct *tsk, struct page *page,
+			unsigned long address, pgprot_t prot);
 
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
 		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
Andi Kleen May 11, 2003, 8:36 a.m. UTC | #3
On Sun, May 11, 2003 at 10:30:35AM +0200, William Lee Irwin III wrote:
> We know which vma is involved at the callsite and what we just set its
> vma->vm_page_prot to; I suggest this patch instead.

No that won't work. On x86-64 VM_STACK_FLAGS looks at the current
thread state if it's an 32bit or 64bit process, and that's not decided
at that point yet.

-Andi
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Andi Kleen May 11, 2003, 8:37 a.m. UTC | #4
On Sun, May 11, 2003 at 10:32:26AM +0200, Andrew Morton wrote:
> Andi Kleen <ak@muc.de> wrote:
> >
> > put_page_dirty must use the page protection of the stack VMA, not hardcoded
> >  PAGE_COPY. They can be different e.g. when the stack is set non executable
> >  via VM_STACK_FLAGS.
> 
> OK.  It seems a bit inefficient to go looking up the vma immediately after
> having created it.
> 
> How about we simply pass the desired protection in to put_dirty_page()?

Fine by me.

-Andi
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Patch
diff mbox series

--- linux-2.5.69-amd64/fs/exec.c-o	2003-04-20 21:21:50.000000000 +0200
+++ linux-2.5.69-amd64/fs/exec.c	2003-05-11 08:38:35.000000000 +0200
@@ -292,7 +292,9 @@ 
 	pgd_t * pgd;
 	pmd_t * pmd;
 	pte_t * pte;
+	pgprot_t prot;
 	struct pte_chain *pte_chain;
+	struct vm_area_struct *vma;
 
 	if (page_count(page) != 1)
 		printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
@@ -314,7 +316,13 @@ 
 	}
 	lru_cache_add_active(page);
 	flush_dcache_page(page);
-	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY))));
+	vma = find_vma(tsk->mm, address);
+	if (!vma) { 
+		printk("put_dirty_page: stack vma not extended yet %lx?\n",address); 
+		prot = PAGE_COPY;
+	} else
+		prot = vma->vm_page_prot;
+	set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
 	pte_chain = page_add_rmap(page, pte, pte_chain);
 	pte_unmap(pte);
 	tsk->mm->rss++;