ceph-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] ceph: correctly release memory from capsnap
@ 2021-08-17 12:35 xiubli
  2021-08-17 14:03 ` Jeff Layton
  0 siblings, 1 reply; 3+ messages in thread
From: xiubli @ 2021-08-17 12:35 UTC (permalink / raw)
  To: jlayton; +Cc: idryomov, pdonnell, ceph-devel, Xiubo Li

From: Xiubo Li <xiubli@redhat.com>

When force umounting, it will try to remove all the session caps.
If there has any capsnap is in the flushing list, the remove session
caps callback will try to release the capsnap->flush_cap memory to
"ceph_cap_flush_cachep" slab cache, while which is allocated from
kmalloc-256 slab cache.

At the same time switch to list_del_init() because just in case the
force umount has removed it from the lists and the
handle_cap_flushsnap_ack() comes then the seconds list_del_init()
won't crash the kernel.

URL: https://tracker.ceph.com/issues/52283
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---

- Only to resolve the crash issue.
- s/list_del/list_del_init/


 fs/ceph/caps.c       | 18 ++++++++++++++----
 fs/ceph/mds_client.c |  7 ++++---
 2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 990258cbd836..4ee0ef87130a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1667,7 +1667,16 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
 
 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
 {
-	return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+	struct ceph_cap_flush *cf;
+
+	cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+	/*
+	 * caps == 0 always means for the capsnap
+	 * caps > 0 means dirty caps being flushed
+	 * caps == -1 means preallocated, not used yet
+	 */
+	cf->caps = -1;
+	return cf;
 }
 
 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
@@ -1704,14 +1713,14 @@ static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
 			prev->wake = true;
 			wake = false;
 		}
-		list_del(&cf->g_list);
+		list_del_init(&cf->g_list);
 	} else if (ci) {
 		if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
 			prev = list_prev_entry(cf, i_list);
 			prev->wake = true;
 			wake = false;
 		}
-		list_del(&cf->i_list);
+		list_del_init(&cf->i_list);
 	} else {
 		BUG_ON(1);
 	}
@@ -3398,7 +3407,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
 		cf = list_first_entry(&to_remove,
 				      struct ceph_cap_flush, i_list);
 		list_del(&cf->i_list);
-		ceph_free_cap_flush(cf);
+		if (cf->caps)
+			ceph_free_cap_flush(cf);
 	}
 
 	if (wake_ci)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 2c3d762c7973..dc30f56115fa 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1226,7 +1226,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 		spin_lock(&mdsc->cap_dirty_lock);
 
 		list_for_each_entry(cf, &to_remove, i_list)
-			list_del(&cf->g_list);
+			list_del_init(&cf->g_list);
 
 		if (!list_empty(&ci->i_dirty_item)) {
 			pr_warn_ratelimited(
@@ -1266,8 +1266,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 		struct ceph_cap_flush *cf;
 		cf = list_first_entry(&to_remove,
 				      struct ceph_cap_flush, i_list);
-		list_del(&cf->i_list);
-		ceph_free_cap_flush(cf);
+		list_del_init(&cf->i_list);
+		if (cf->caps)
+			ceph_free_cap_flush(cf);
 	}
 
 	wake_up_all(&ci->i_cap_wq);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] ceph: correctly release memory from capsnap
  2021-08-17 12:35 [PATCH v2] ceph: correctly release memory from capsnap xiubli
@ 2021-08-17 14:03 ` Jeff Layton
  2021-08-18  1:28   ` Xiubo Li
  0 siblings, 1 reply; 3+ messages in thread
From: Jeff Layton @ 2021-08-17 14:03 UTC (permalink / raw)
  To: xiubli; +Cc: idryomov, pdonnell, ceph-devel

On Tue, 2021-08-17 at 20:35 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> When force umounting, it will try to remove all the session caps.
> If there has any capsnap is in the flushing list, the remove session
> caps callback will try to release the capsnap->flush_cap memory to
> "ceph_cap_flush_cachep" slab cache, while which is allocated from
> kmalloc-256 slab cache.
> 
> At the same time switch to list_del_init() because just in case the
> force umount has removed it from the lists and the
> handle_cap_flushsnap_ack() comes then the seconds list_del_init()
> won't crash the kernel.
> 
> URL: https://tracker.ceph.com/issues/52283
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
> 
> - Only to resolve the crash issue.
> - s/list_del/list_del_init/
> 
> 
>  fs/ceph/caps.c       | 18 ++++++++++++++----
>  fs/ceph/mds_client.c |  7 ++++---
>  2 files changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index 990258cbd836..4ee0ef87130a 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -1667,7 +1667,16 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
>  
>  struct ceph_cap_flush *ceph_alloc_cap_flush(void)
>  {
> -	return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
> +	struct ceph_cap_flush *cf;
> +
> +	cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
> +	/*
> +	 * caps == 0 always means for the capsnap
> +	 * caps > 0 means dirty caps being flushed
> +	 * caps == -1 means preallocated, not used yet
> +	 */
> +	cf->caps = -1;
> +	return cf;
>  }
>  
>  void ceph_free_cap_flush(struct ceph_cap_flush *cf)
> @@ -1704,14 +1713,14 @@ static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
>  			prev->wake = true;
>  			wake = false;
>  		}
> -		list_del(&cf->g_list);
> +		list_del_init(&cf->g_list);
>  	} else if (ci) {
>  		if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
>  			prev = list_prev_entry(cf, i_list);
>  			prev->wake = true;
>  			wake = false;
>  		}
> -		list_del(&cf->i_list);
> +		list_del_init(&cf->i_list);
>  	} else {
>  		BUG_ON(1);
>  	}
> @@ -3398,7 +3407,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
>  		cf = list_first_entry(&to_remove,
>  				      struct ceph_cap_flush, i_list);
>  		list_del(&cf->i_list);
> -		ceph_free_cap_flush(cf);
> +		if (cf->caps)
> +			ceph_free_cap_flush(cf);
>  	}
>  
>  	if (wake_ci)
> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
> index 2c3d762c7973..dc30f56115fa 100644
> --- a/fs/ceph/mds_client.c
> +++ b/fs/ceph/mds_client.c
> @@ -1226,7 +1226,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
>  		spin_lock(&mdsc->cap_dirty_lock);
>  
>  		list_for_each_entry(cf, &to_remove, i_list)
> -			list_del(&cf->g_list);
> +			list_del_init(&cf->g_list);
>  
>  		if (!list_empty(&ci->i_dirty_item)) {
>  			pr_warn_ratelimited(
> @@ -1266,8 +1266,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
>  		struct ceph_cap_flush *cf;
>  		cf = list_first_entry(&to_remove,
>  				      struct ceph_cap_flush, i_list);
> -		list_del(&cf->i_list);
> -		ceph_free_cap_flush(cf);
> +		list_del_init(&cf->i_list);
> +		if (cf->caps)
> +			ceph_free_cap_flush(cf);
>  	}
>  
>  	wake_up_all(&ci->i_cap_wq);

This patch doesn't seem to apply to the current testing branch. Is it
against an older tree?

-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] ceph: correctly release memory from capsnap
  2021-08-17 14:03 ` Jeff Layton
@ 2021-08-18  1:28   ` Xiubo Li
  0 siblings, 0 replies; 3+ messages in thread
From: Xiubo Li @ 2021-08-18  1:28 UTC (permalink / raw)
  To: Jeff Layton; +Cc: idryomov, pdonnell, ceph-devel


On 8/17/21 10:03 PM, Jeff Layton wrote:
> On Tue, 2021-08-17 at 20:35 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> When force umounting, it will try to remove all the session caps.
>> If there has any capsnap is in the flushing list, the remove session
>> caps callback will try to release the capsnap->flush_cap memory to
>> "ceph_cap_flush_cachep" slab cache, while which is allocated from
>> kmalloc-256 slab cache.
>>
>> At the same time switch to list_del_init() because just in case the
>> force umount has removed it from the lists and the
>> handle_cap_flushsnap_ack() comes then the seconds list_del_init()
>> won't crash the kernel.
>>
>> URL: https://tracker.ceph.com/issues/52283
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>
>> - Only to resolve the crash issue.
>> - s/list_del/list_del_init/
>>
>>
>>   fs/ceph/caps.c       | 18 ++++++++++++++----
>>   fs/ceph/mds_client.c |  7 ++++---
>>   2 files changed, 18 insertions(+), 7 deletions(-)
>>
>> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
>> index 990258cbd836..4ee0ef87130a 100644
>> --- a/fs/ceph/caps.c
>> +++ b/fs/ceph/caps.c
>> @@ -1667,7 +1667,16 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
>>   
>>   struct ceph_cap_flush *ceph_alloc_cap_flush(void)
>>   {
>> -	return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
>> +	struct ceph_cap_flush *cf;
>> +
>> +	cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
>> +	/*
>> +	 * caps == 0 always means for the capsnap
>> +	 * caps > 0 means dirty caps being flushed
>> +	 * caps == -1 means preallocated, not used yet
>> +	 */
>> +	cf->caps = -1;
>> +	return cf;
>>   }
>>   
>>   void ceph_free_cap_flush(struct ceph_cap_flush *cf)
>> @@ -1704,14 +1713,14 @@ static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
>>   			prev->wake = true;
>>   			wake = false;
>>   		}
>> -		list_del(&cf->g_list);
>> +		list_del_init(&cf->g_list);
>>   	} else if (ci) {
>>   		if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
>>   			prev = list_prev_entry(cf, i_list);
>>   			prev->wake = true;
>>   			wake = false;
>>   		}
>> -		list_del(&cf->i_list);
>> +		list_del_init(&cf->i_list);
>>   	} else {
>>   		BUG_ON(1);
>>   	}
>> @@ -3398,7 +3407,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
>>   		cf = list_first_entry(&to_remove,
>>   				      struct ceph_cap_flush, i_list);
>>   		list_del(&cf->i_list);
>> -		ceph_free_cap_flush(cf);
>> +		if (cf->caps)
>> +			ceph_free_cap_flush(cf);
>>   	}
>>   
>>   	if (wake_ci)
>> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
>> index 2c3d762c7973..dc30f56115fa 100644
>> --- a/fs/ceph/mds_client.c
>> +++ b/fs/ceph/mds_client.c
>> @@ -1226,7 +1226,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
>>   		spin_lock(&mdsc->cap_dirty_lock);
>>   
>>   		list_for_each_entry(cf, &to_remove, i_list)
>> -			list_del(&cf->g_list);
>> +			list_del_init(&cf->g_list);
>>   
>>   		if (!list_empty(&ci->i_dirty_item)) {
>>   			pr_warn_ratelimited(
>> @@ -1266,8 +1266,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
>>   		struct ceph_cap_flush *cf;
>>   		cf = list_first_entry(&to_remove,
>>   				      struct ceph_cap_flush, i_list);
>> -		list_del(&cf->i_list);
>> -		ceph_free_cap_flush(cf);
>> +		list_del_init(&cf->i_list);
>> +		if (cf->caps)
>> +			ceph_free_cap_flush(cf);
>>   	}
>>   
>>   	wake_up_all(&ci->i_cap_wq);
> This patch doesn't seem to apply to the current testing branch. Is it
> against an older tree?

I was trying to test the old tags and forgot to switch it back.

Have send out the V3 to fix it.

Thanks



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-08-18  1:28 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-17 12:35 [PATCH v2] ceph: correctly release memory from capsnap xiubli
2021-08-17 14:03 ` Jeff Layton
2021-08-18  1:28   ` Xiubo Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).