linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/3] a few cleanup and bugfixes about shmem
@ 2022-06-06  3:45 Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Chen Wandun @ 2022-06-06  3:45 UTC (permalink / raw)
  To: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang
  Cc: chenwandun

v2 ==> v3:
patch1 return the errno from shmem_init_inodecache instead
of NULL on failure.

v1 ==> v2:
combine patch2 and patch3 into a single patch.

Chen Wandun (3):
  mm/shmem: check return value of shmem_init_inodecache
  mm/shmem: return error code directly for invalid addr
  mm/shmem: rework calculation of inflated_addr in
    shmem_get_unmapped_area

 mm/shmem.c | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

-- 
2.25.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache
  2022-06-06  3:45 [PATCH v3 0/3] a few cleanup and bugfixes about shmem Chen Wandun
@ 2022-06-06  3:45 ` Chen Wandun
  2022-06-06  4:19   ` Hugh Dickins
  2022-06-06  9:39   ` Muchun Song
  2022-06-06  3:45 ` [PATCH v3 2/3] mm/shmem: return error code directly for invalid addr Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 3/3] mm/shmem: rework calculation of inflated_addr in shmem_get_unmapped_area Chen Wandun
  2 siblings, 2 replies; 6+ messages in thread
From: Chen Wandun @ 2022-06-06  3:45 UTC (permalink / raw)
  To: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang
  Cc: chenwandun

It will result in null pointer access if shmem_init_inodecache fail,
so check return value of shmem_init_inodecache

Signed-off-by: Chen Wandun <chenwandun@huawei.com>
---
 mm/shmem.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 12d45a03f7fc..7419ab219b97 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3775,11 +3775,16 @@ static void shmem_init_inode(void *foo)
 	inode_init_once(&info->vfs_inode);
 }
 
-static void shmem_init_inodecache(void)
+static int shmem_init_inodecache(void)
 {
 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
 				sizeof(struct shmem_inode_info),
 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
+
+	if (!shmem_inode_cachep)
+		return -ENOMEM;
+
+	return 0;
 }
 
 static void shmem_destroy_inodecache(void)
@@ -3923,7 +3928,9 @@ void __init shmem_init(void)
 {
 	int error;
 
-	shmem_init_inodecache();
+	error = shmem_init_inodecache();
+	if (error)
+		goto out2;
 
 	error = register_filesystem(&shmem_fs_type);
 	if (error) {
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 2/3] mm/shmem: return error code directly for invalid addr
  2022-06-06  3:45 [PATCH v3 0/3] a few cleanup and bugfixes about shmem Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
@ 2022-06-06  3:45 ` Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 3/3] mm/shmem: rework calculation of inflated_addr in shmem_get_unmapped_area Chen Wandun
  2 siblings, 0 replies; 6+ messages in thread
From: Chen Wandun @ 2022-06-06  3:45 UTC (permalink / raw)
  To: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang
  Cc: chenwandun

Return error code directly for addr is not PAGE_SIZE aligned or
beyond TASK_SIZE, no need to check these cases in caller.

Signed-off-by: Chen Wandun <chenwandun@huawei.com>
---
 mm/shmem.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 7419ab219b97..48b7172f81d6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2142,10 +2142,10 @@ unsigned long shmem_get_unmapped_area(struct file *file,
 		return addr;
 	if (IS_ERR_VALUE(addr))
 		return addr;
-	if (addr & ~PAGE_MASK)
-		return addr;
+	if (offset_in_page(addr))
+		return -EINVAL;
 	if (addr > TASK_SIZE - len)
-		return addr;
+		return -ENOMEM;
 
 	if (shmem_huge == SHMEM_HUGE_DENY)
 		return addr;
@@ -2196,7 +2196,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
 	if (IS_ERR_VALUE(inflated_addr))
 		return addr;
-	if (inflated_addr & ~PAGE_MASK)
+	if (offset_in_page(inflated_addr))
 		return addr;
 
 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 3/3] mm/shmem: rework calculation of inflated_addr in shmem_get_unmapped_area
  2022-06-06  3:45 [PATCH v3 0/3] a few cleanup and bugfixes about shmem Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
  2022-06-06  3:45 ` [PATCH v3 2/3] mm/shmem: return error code directly for invalid addr Chen Wandun
@ 2022-06-06  3:45 ` Chen Wandun
  2 siblings, 0 replies; 6+ messages in thread
From: Chen Wandun @ 2022-06-06  3:45 UTC (permalink / raw)
  To: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang
  Cc: chenwandun

In function shmem_get_unmapped_area, inflated_offset and offset
are unsigned long, it will result in underflow when offset below
inflated_offset, a little confusing, no functional change.

Signed-off-by: Chen Wandun <chenwandun@huawei.com>
---
 mm/shmem.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 48b7172f81d6..ac277b11bdfa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2200,9 +2200,12 @@ unsigned long shmem_get_unmapped_area(struct file *file,
 		return addr;
 
 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
-	inflated_addr += offset - inflated_offset;
-	if (inflated_offset > offset)
+	if (offset > inflated_offset)
+		inflated_addr += offset - inflated_offset;
+	else if (offset < inflated_offset) {
+		inflated_addr -= inflated_offset - offset;
 		inflated_addr += HPAGE_PMD_SIZE;
+	}
 
 	if (inflated_addr > TASK_SIZE - len)
 		return addr;
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache
  2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
@ 2022-06-06  4:19   ` Hugh Dickins
  2022-06-06  9:39   ` Muchun Song
  1 sibling, 0 replies; 6+ messages in thread
From: Hugh Dickins @ 2022-06-06  4:19 UTC (permalink / raw)
  To: Chen Wandun
  Cc: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang

On Mon, 6 Jun 2022, Chen Wandun wrote:

> It will result in null pointer access if shmem_init_inodecache fail,
> so check return value of shmem_init_inodecache
> 
> Signed-off-by: Chen Wandun <chenwandun@huawei.com>

Thank you, but NAK.

It's a pity that you've been put to the trouble of sending a v3, sorry
about that: but if SLAB_PANIC no longer works, or panic() has taken to
returning, then that's what needs fixing, not shmem_init_inodecache().

Was this one supposed to be the bugfix?  And I'm afraid I don't care
for your "cleanups" in 2/3 and 3/3 either: a matter of taste, and our
tastes differ.

I'd rather not spend the time on these: maybe look for somewhere else
to change around than mm/shmem.c?  Or better, please help us all by
using your time to review the functional patches being posted.

Thanks,
Hugh

> ---
>  mm/shmem.c | 11 +++++++++--
>  1 file changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 12d45a03f7fc..7419ab219b97 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -3775,11 +3775,16 @@ static void shmem_init_inode(void *foo)
>  	inode_init_once(&info->vfs_inode);
>  }
>  
> -static void shmem_init_inodecache(void)
> +static int shmem_init_inodecache(void)
>  {
>  	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
>  				sizeof(struct shmem_inode_info),
>  				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
> +
> +	if (!shmem_inode_cachep)
> +		return -ENOMEM;
> +
> +	return 0;
>  }
>  
>  static void shmem_destroy_inodecache(void)
> @@ -3923,7 +3928,9 @@ void __init shmem_init(void)
>  {
>  	int error;
>  
> -	shmem_init_inodecache();
> +	error = shmem_init_inodecache();
> +	if (error)
> +		goto out2;
>  
>  	error = register_filesystem(&shmem_fs_type);
>  	if (error) {
> -- 
> 2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache
  2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
  2022-06-06  4:19   ` Hugh Dickins
@ 2022-06-06  9:39   ` Muchun Song
  1 sibling, 0 replies; 6+ messages in thread
From: Muchun Song @ 2022-06-06  9:39 UTC (permalink / raw)
  To: Chen Wandun
  Cc: hughd, akpm, linux-mm, linux-kernel, willy, david, wangkefeng.wang

On Mon, Jun 06, 2022 at 11:45:28AM +0800, Chen Wandun wrote:
> It will result in null pointer access if shmem_init_inodecache fail,
> so check return value of shmem_init_inodecache
> 
> Signed-off-by: Chen Wandun <chenwandun@huawei.com>
> ---
>  mm/shmem.c | 11 +++++++++--
>  1 file changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 12d45a03f7fc..7419ab219b97 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -3775,11 +3775,16 @@ static void shmem_init_inode(void *foo)
>  	inode_init_once(&info->vfs_inode);
>  }
>  
> -static void shmem_init_inodecache(void)
> +static int shmem_init_inodecache(void)
>  {
>  	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
>  				sizeof(struct shmem_inode_info),
>  				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);

NACK, we cannot dereference a NULL pointer since SLAB_PANIC is passed
to kmem_cache_create().

> +
> +	if (!shmem_inode_cachep)
> +		return -ENOMEM;
> +
> +	return 0;
>  }
>  
>  static void shmem_destroy_inodecache(void)
> @@ -3923,7 +3928,9 @@ void __init shmem_init(void)
>  {
>  	int error;
>  
> -	shmem_init_inodecache();
> +	error = shmem_init_inodecache();
> +	if (error)
> +		goto out2;
>  
>  	error = register_filesystem(&shmem_fs_type);
>  	if (error) {
> -- 
> 2.25.1
> 
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-06-06  9:39 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-06  3:45 [PATCH v3 0/3] a few cleanup and bugfixes about shmem Chen Wandun
2022-06-06  3:45 ` [PATCH v3 1/3] mm/shmem: check return value of shmem_init_inodecache Chen Wandun
2022-06-06  4:19   ` Hugh Dickins
2022-06-06  9:39   ` Muchun Song
2022-06-06  3:45 ` [PATCH v3 2/3] mm/shmem: return error code directly for invalid addr Chen Wandun
2022-06-06  3:45 ` [PATCH v3 3/3] mm/shmem: rework calculation of inflated_addr in shmem_get_unmapped_area Chen Wandun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).