linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@zip.com.au>
To: Linus Torvalds <torvalds@transmeta.com>
Cc: lkml <linux-kernel@vger.kernel.org>
Subject: [patch 9/12] sync get_user_pages with 2.4
Date: Fri, 09 Aug 2002 17:57:27 -0700	[thread overview]
Message-ID: <3D5464F7.179A69A0@zip.com.au> (raw)



Forward port of get_user_pages() change from 2.4.

- If the vma is marked as VM_IO area then fail the map.

  This prevents kernel deadlocks which occur when applications which
  have frame buffers mapped try to dump core.  Also prevents a kernel
  oops when a debugger is attached to a process which has an IO mmap.

- Check that the mapped page is inside mem_map[] (pfn_valid).

- inline follow_page() and remove the preempt_disable()s.  It has
  only a single callsite and is called under spinloclk.


 memory.c |   45 +++++++++++++++++++++++++--------------------
 1 files changed, 25 insertions, 20 deletions

--- 2.5.30/mm/memory.c~get_user_pages-sync	Fri Aug  9 17:36:46 2002
+++ 2.5.30-akpm/mm/memory.c	Fri Aug  9 17:36:46 2002
@@ -432,9 +432,11 @@ void zap_page_range(struct vm_area_struc
 }
 
 /*
- * Do a quick page-table lookup for a single page. 
+ * Do a quick page-table lookup for a single page.
+ * mm->page_table_lock must be held.
  */
-static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) 
+static inline struct page *
+follow_page(struct mm_struct *mm, unsigned long address, int write) 
 {
 	pgd_t *pgd;
 	pmd_t *pmd;
@@ -449,19 +451,14 @@ static struct page * follow_page(struct 
 	if (pmd_none(*pmd) || pmd_bad(*pmd))
 		goto out;
 
-	preempt_disable();
 	ptep = pte_offset_map(pmd, address);
-	if (!ptep) {
-		preempt_enable();
+	if (!ptep)
 		goto out;
-	}
 
 	pte = *ptep;
 	pte_unmap(ptep);
-	preempt_enable();
 	if (pte_present(pte)) {
-		if (!write ||
-		    (pte_write(pte) && pte_dirty(pte))) {
+		if (!write || (pte_write(pte) && pte_dirty(pte))) {
 			pfn = pte_pfn(pte);
 			if (pfn_valid(pfn))
 				return pfn_to_page(pfn);
@@ -478,13 +475,17 @@ out:
  * with IO-aperture pages in kiobufs.
  */
 
-static inline struct page * get_page_map(struct page *page)
+static inline struct page *get_page_map(struct page *page)
 {
+	if (!pfn_valid(page_to_pfn(page)))
+		return 0;
 	return page;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
-		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long start, int len, int write, int force,
+		struct page **pages, struct vm_area_struct **vmas)
 {
 	int i;
 	unsigned int flags;
@@ -496,14 +497,14 @@ int get_user_pages(struct task_struct *t
 	flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 	flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 	i = 0;
-	
 
 	do {
 		struct vm_area_struct *	vma;
 
 		vma = find_extend_vma(mm, start);
 
-		if ( !vma || !(flags & vma->vm_flags) )
+		if (!vma || (pages && (vma->vm_flags & VM_IO))
+				|| !(flags & vma->vm_flags))
 			return i ? : -EFAULT;
 
 		spin_lock(&mm->page_table_lock);
@@ -511,7 +512,7 @@ int get_user_pages(struct task_struct *t
 			struct page *map;
 			while (!(map = follow_page(mm, start, write))) {
 				spin_unlock(&mm->page_table_lock);
-				switch (handle_mm_fault(mm, vma, start, write)) {
+				switch (handle_mm_fault(mm,vma,start,write)) {
 				case VM_FAULT_MINOR:
 					tsk->min_flt++;
 					break;
@@ -529,11 +530,14 @@ int get_user_pages(struct task_struct *t
 			}
 			if (pages) {
 				pages[i] = get_page_map(map);
-				/* FIXME: call the correct function,
-				 * depending on the type of the found page
-				 */
-				if (pages[i])
-					page_cache_get(pages[i]);
+				if (!pages[i]) {
+					spin_unlock(&mm->page_table_lock);
+					while (i--)
+						page_cache_release(pages[i]);
+					i = -EFAULT;
+					goto out;
+				}
+				page_cache_get(pages[i]);
 			}
 			if (vmas)
 				vmas[i] = vma;
@@ -543,6 +547,7 @@ int get_user_pages(struct task_struct *t
 		} while(len && start < vma->vm_end);
 		spin_unlock(&mm->page_table_lock);
 	} while(len);
+out:
 	return i;
 }
 

.

                 reply	other threads:[~2002-08-10  0:58 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3D5464F7.179A69A0@zip.com.au \
    --to=akpm@zip.com.au \
    --cc=linux-kernel@vger.kernel.org \
    --cc=torvalds@transmeta.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).