mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* + mm-memremap_pages-convert-to-struct-range-fix.patch added to -mm tree
@ 2020-09-29  0:51 akpm
  0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2020-09-29  0:51 UTC (permalink / raw)
  To: dan.carpenter, dan.j.williams, jgg, jglisse, joao.m.martins,
	Julia.Lawall, Markus.Elfring, mm-commits, rcampbell,
	vishal.l.verma, weiyongjun1


The patch titled
     Subject: mm/hmm/test: use after free in dmirror_allocate_chunk()
has been added to the -mm tree.  Its filename is
     mm-memremap_pages-convert-to-struct-range-fix.patch

This patch should soon appear at
    https://ozlabs.org/~akpm/mmots/broken-out/mm-memremap_pages-convert-to-struct-range-fix.patch
and later at
    https://ozlabs.org/~akpm/mmotm/broken-out/mm-memremap_pages-convert-to-struct-range-fix.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Dan Carpenter <dan.carpenter@oracle.com>
Subject: mm/hmm/test: use after free in dmirror_allocate_chunk()

The error handling code does this:

err_free:
	kfree(devmem);
        ^^^^^^^^^^^^^
err_release:
	release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
                           ^^^^^^^^
The problem is that when we use "devmem->pagemap.range.start" the
"devmem" pointer is either NULL or freed.

Neither the allocation nor the call to request_free_mem_region() has to
be done under the lock so I moved those to the start of the function.

Link: https://lkml.kernel.org/r/20200926121402.GA7467@kadam
Fixes: 1f9c4bb986d9 ("mm/memremap_pages: convert to 'struct range'")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: Markus Elfring <Markus.Elfring@web.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 lib/test_hmm.c |   42 +++++++++++++++++++++---------------------
 1 file changed, 21 insertions(+), 21 deletions(-)

--- a/lib/test_hmm.c~mm-memremap_pages-convert-to-struct-range-fix
+++ a/lib/test_hmm.c
@@ -460,6 +460,21 @@ static bool dmirror_allocate_chunk(struc
 	unsigned long pfn_last;
 	void *ptr;
 
+	devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+	if (!devmem)
+		return -ENOMEM;
+
+	res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+				      "hmm_dmirror");
+	if (IS_ERR(res))
+		goto err_devmem;
+
+	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	devmem->pagemap.range.start = res->start;
+	devmem->pagemap.range.end = res->end;
+	devmem->pagemap.ops = &dmirror_devmem_ops;
+	devmem->pagemap.owner = mdevice;
+
 	mutex_lock(&mdevice->devmem_lock);
 
 	if (mdevice->devmem_count == mdevice->devmem_capacity) {
@@ -472,29 +487,14 @@ static bool dmirror_allocate_chunk(struc
 				sizeof(new_chunks[0]) * new_capacity,
 				GFP_KERNEL);
 		if (!new_chunks)
-			goto err;
+			goto err_release;
 		mdevice->devmem_capacity = new_capacity;
 		mdevice->devmem_chunks = new_chunks;
 	}
 
-	res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
-					"hmm_dmirror");
-	if (IS_ERR(res))
-		goto err;
-
-	devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
-	if (!devmem)
-		goto err_release;
-
-	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
-	devmem->pagemap.range.start = res->start;
-	devmem->pagemap.range.end = res->end;
-	devmem->pagemap.ops = &dmirror_devmem_ops;
-	devmem->pagemap.owner = mdevice;
-
 	ptr = memremap_pages(&devmem->pagemap, numa_node_id());
 	if (IS_ERR(ptr))
-		goto err_free;
+		goto err_release;
 
 	devmem->mdevice = mdevice;
 	pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
@@ -525,12 +525,12 @@ static bool dmirror_allocate_chunk(struc
 
 	return true;
 
-err_free:
-	kfree(devmem);
 err_release:
-	release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
-err:
 	mutex_unlock(&mdevice->devmem_lock);
+	release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+err_devmem:
+	kfree(devmem);
+
 	return false;
 }
 
_

Patches currently in -mm which might be from dan.carpenter@oracle.com are

mm-memremap_pages-convert-to-struct-range-fix.patch


^ permalink raw reply	[flat|nested] 2+ messages in thread

* + mm-memremap_pages-convert-to-struct-range-fix.patch added to -mm tree
@ 2020-09-12 18:39 akpm
  0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2020-09-12 18:39 UTC (permalink / raw)
  To: mm-commits, vgoyal, roger.pau, sfr


The patch titled
     Subject: merge fix up for "mm/memremap_pages: convert to 'struct range'"
has been added to the -mm tree.  Its filename is
     mm-memremap_pages-convert-to-struct-range-fix.patch

This patch should soon appear at
    https://ozlabs.org/~akpm/mmots/broken-out/mm-memremap_pages-convert-to-struct-range-fix.patch
and later at
    https://ozlabs.org/~akpm/mmotm/broken-out/mm-memremap_pages-convert-to-struct-range-fix.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Stephen Rothwell <sfr@canb.auug.org.au>
Subject: merge fix up for "mm/memremap_pages: convert to 'struct range'"

Link: https://lkml.kernel.org/r/20200908200950.1368e71b@canb.auug.org.au
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Roger Pau Monn <roger.pau@citrix.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 drivers/xen/unpopulated-alloc.c |   15 +++++++++------
 fs/fuse/virtio_fs.c             |    3 +--
 2 files changed, 10 insertions(+), 8 deletions(-)

--- a/drivers/xen/unpopulated-alloc.c~mm-memremap_pages-convert-to-struct-range-fix
+++ a/drivers/xen/unpopulated-alloc.c
@@ -18,6 +18,7 @@ static unsigned int list_count;
 static int fill_list(unsigned int nr_pages)
 {
 	struct dev_pagemap *pgmap;
+	struct resource res;
 	void *vaddr;
 	unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
 	int ret;
@@ -27,10 +28,10 @@ static int fill_list(unsigned int nr_pag
 		return -ENOMEM;
 
 	pgmap->type = MEMORY_DEVICE_GENERIC;
-	pgmap->res.name = "Xen scratch";
-	pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+	res.name = "Xen scratch";
+	res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
-	ret = allocate_resource(&iomem_resource, &pgmap->res,
+	ret = allocate_resource(&iomem_resource, &res,
 				alloc_pages * PAGE_SIZE, 0, -1,
 				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
 	if (ret < 0) {
@@ -38,6 +39,8 @@ static int fill_list(unsigned int nr_pag
 		kfree(pgmap);
 		return ret;
 	}
+	pgmap->range.start = res.start;
+	pgmap->range.end = res.end;
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
         /*
@@ -50,12 +53,12 @@ static int fill_list(unsigned int nr_pag
          * conflict with any devices.
          */
 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-		xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+		xen_pfn_t pfn = PFN_DOWN(res.start);
 
 		for (i = 0; i < alloc_pages; i++) {
 			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
 				pr_warn("set_phys_to_machine() failed, no memory added\n");
-				release_resource(&pgmap->res);
+				release_resource(&res);
 				kfree(pgmap);
 				return -ENOMEM;
 			}
@@ -66,7 +69,7 @@ static int fill_list(unsigned int nr_pag
 	vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
 	if (IS_ERR(vaddr)) {
 		pr_err("Cannot remap memory range\n");
-		release_resource(&pgmap->res);
+		release_resource(&res);
 		kfree(pgmap);
 		return PTR_ERR(vaddr);
 	}
--- a/fs/fuse/virtio_fs.c~mm-memremap_pages-convert-to-struct-range-fix
+++ a/fs/fuse/virtio_fs.c
@@ -835,8 +835,7 @@ static int virtio_fs_setup_dax(struct vi
 	 * initialize a struct resource from scratch (only the start
 	 * and end fields will be used).
 	 */
-	pgmap->res = (struct resource){
-		.name = "virtio-fs dax window",
+	pgmap->range = (struct range){
 		.start = (phys_addr_t) cache_reg.addr,
 		.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
 	};
_

Patches currently in -mm which might be from sfr@canb.auug.org.au are

mm-memremap_pages-convert-to-struct-range-fix.patch
mm-madvise-introduce-process_madvise-syscall-an-external-memory-hinting-api-fix-fix-fix.patch


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-09-29  0:51 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-29  0:51 + mm-memremap_pages-convert-to-struct-range-fix.patch added to -mm tree akpm
  -- strict thread matches above, loose matches on Subject: below --
2020-09-12 18:39 akpm

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).