All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] ceph: add dirs/files' opened/opening metric support
@ 2020-08-18 11:53 xiubli
  2020-08-18 20:05 ` Jeff Layton
  0 siblings, 1 reply; 8+ messages in thread
From: xiubli @ 2020-08-18 11:53 UTC (permalink / raw)
  To: jlayton; +Cc: idryomov, zyan, pdonnell, ceph-devel, Xiubo Li

From: Xiubo Li <xiubli@redhat.com>

This will add the opening/opened dirs/files metric in the debugfs.

item            total
---------------------------
dirs opening    1
dirs opened     0
files opening   2
files opened    23
pinned caps     5442

URL: https://tracker.ceph.com/issues/47005
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
 fs/ceph/debugfs.c |  9 +++++++++
 fs/ceph/file.c    | 22 ++++++++++++++++++++--
 fs/ceph/metric.c  |  5 +++++
 fs/ceph/metric.h  |  5 +++++
 4 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 97539b497e4c..20043382d825 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -148,6 +148,15 @@ static int metric_show(struct seq_file *s, void *p)
 	int nr_caps = 0;
 	s64 total, sum, avg, min, max, sq;
 
+	seq_printf(s, "item            total\n");
+	seq_printf(s, "---------------------------\n");
+	seq_printf(s, "dirs opening    %lld\n", atomic64_read(&m->dirs_opening));
+	seq_printf(s, "dirs opened     %lld\n", atomic64_read(&m->dirs_opened));
+	seq_printf(s, "files opening   %lld\n", atomic64_read(&m->files_opening));
+	seq_printf(s, "files opened    %lld\n", atomic64_read(&m->files_opened));
+	seq_printf(s, "pinned caps     %lld\n", atomic64_read(&m->total_caps));
+
+	seq_printf(s, "\n");
 	seq_printf(s, "item          total       avg_lat(us)     min_lat(us)     max_lat(us)     stdev(us)\n");
 	seq_printf(s, "-----------------------------------------------------------------------------------\n");
 
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 04ab99c0223a..5fa28a620932 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -205,6 +205,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 					int fmode, bool isdir)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+	struct ceph_mds_client *mdsc = fsc->mdsc;
 	struct ceph_file_info *fi;
 
 	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
@@ -212,20 +214,25 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 	BUG_ON(inode->i_fop->release != ceph_release);
 
 	if (isdir) {
-		struct ceph_dir_file_info *dfi =
-			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
+		struct ceph_dir_file_info *dfi;
+
+		atomic64_dec(&mdsc->metric.dirs_opening);
+		dfi = kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 		if (!dfi)
 			return -ENOMEM;
 
+		atomic64_inc(&mdsc->metric.dirs_opened);
 		file->private_data = dfi;
 		fi = &dfi->file_info;
 		dfi->next_offset = 2;
 		dfi->readdir_cache_idx = -1;
 	} else {
+		atomic64_dec(&mdsc->metric.files_opening);
 		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 		if (!fi)
 			return -ENOMEM;
 
+		atomic64_inc(&mdsc->metric.files_opened);
 		file->private_data = fi;
 	}
 
@@ -371,6 +378,11 @@ int ceph_open(struct inode *inode, struct file *file)
 		return ceph_init_file(inode, file, fmode);
 	}
 
+	if (S_ISDIR(inode->i_mode))
+		atomic64_inc(&mdsc->metric.dirs_opening);
+	else
+		atomic64_inc(&mdsc->metric.files_opening);
+
 	/*
 	 * No need to block if we have caps on the auth MDS (for
 	 * write) or any MDS (for read).  Update wanted set
@@ -408,6 +420,8 @@ int ceph_open(struct inode *inode, struct file *file)
 	req = prepare_open_request(inode->i_sb, flags, 0);
 	if (IS_ERR(req)) {
 		err = PTR_ERR(req);
+		atomic64_dec(&mdsc->metric.dirs_opening);
+		atomic64_dec(&mdsc->metric.files_opening);
 		goto out;
 	}
 	req->r_inode = inode;
@@ -783,12 +797,15 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 int ceph_release(struct inode *inode, struct file *file)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+	struct ceph_mds_client *mdsc = fsc->mdsc;
 
 	if (S_ISDIR(inode->i_mode)) {
 		struct ceph_dir_file_info *dfi = file->private_data;
 		dout("release inode %p dir file %p\n", inode, file);
 		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 
+		atomic64_dec(&mdsc->metric.dirs_opened);
 		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
 
 		if (dfi->last_readdir)
@@ -801,6 +818,7 @@ int ceph_release(struct inode *inode, struct file *file)
 		dout("release inode %p regular file %p\n", inode, file);
 		WARN_ON(!list_empty(&fi->rw_contexts));
 
+		atomic64_dec(&mdsc->metric.files_opened);
 		ceph_put_fmode(ci, fi->fmode, 1);
 
 		kmem_cache_free(ceph_file_cachep, fi);
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 2466b261fba2..bf49941e02bd 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -192,6 +192,11 @@ int ceph_metric_init(struct ceph_client_metric *m)
 	m->total_metadatas = 0;
 	m->metadata_latency_sum = 0;
 
+        atomic64_set(&m->dirs_opening, 0);
+        atomic64_set(&m->dirs_opened, 0);
+        atomic64_set(&m->files_opening, 0);
+        atomic64_set(&m->files_opened, 0);
+
 	m->session = NULL;
 	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
 
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index 1d0959d669d7..621d31d7b1e3 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -115,6 +115,11 @@ struct ceph_client_metric {
 	ktime_t metadata_latency_min;
 	ktime_t metadata_latency_max;
 
+        atomic64_t dirs_opening;
+        atomic64_t dirs_opened;
+        atomic64_t files_opening;
+        atomic64_t files_opened;
+
 	struct ceph_mds_session *session;
 	struct delayed_work delayed_work;  /* delayed work */
 };
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-18 11:53 [PATCH] ceph: add dirs/files' opened/opening metric support xiubli
@ 2020-08-18 20:05 ` Jeff Layton
  2020-08-18 20:10   ` Patrick Donnelly
  2020-08-19  1:52   ` Xiubo Li
  0 siblings, 2 replies; 8+ messages in thread
From: Jeff Layton @ 2020-08-18 20:05 UTC (permalink / raw)
  To: xiubli; +Cc: idryomov, zyan, pdonnell, ceph-devel

On Tue, 2020-08-18 at 07:53 -0400, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> This will add the opening/opened dirs/files metric in the debugfs.
> 
> item            total
> ---------------------------
> dirs opening    1
> dirs opened     0
> files opening   2
> files opened    23
> pinned caps     5442
> 

Not much explanation for this patch or in the tracker below. I think
this warrants a good description of the problems you intend to help
solve with this.

I assume you're looking at this to help track down "client not
responding to cap recall" messages? 

> URL: https://tracker.ceph.com/issues/47005
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
>  fs/ceph/debugfs.c |  9 +++++++++
>  fs/ceph/file.c    | 22 ++++++++++++++++++++--
>  fs/ceph/metric.c  |  5 +++++
>  fs/ceph/metric.h  |  5 +++++
>  4 files changed, 39 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> index 97539b497e4c..20043382d825 100644
> --- a/fs/ceph/debugfs.c
> +++ b/fs/ceph/debugfs.c
> @@ -148,6 +148,15 @@ static int metric_show(struct seq_file *s, void *p)
>  	int nr_caps = 0;
>  	s64 total, sum, avg, min, max, sq;
>  
> +	seq_printf(s, "item            total\n");
> +	seq_printf(s, "---------------------------\n");
> +	seq_printf(s, "dirs opening    %lld\n", atomic64_read(&m->dirs_opening));
> +	seq_printf(s, "dirs opened     %lld\n", atomic64_read(&m->dirs_opened));
> +	seq_printf(s, "files opening   %lld\n", atomic64_read(&m->files_opening));
> +	seq_printf(s, "files opened    %lld\n", atomic64_read(&m->files_opened));
> +	seq_printf(s, "pinned caps     %lld\n", atomic64_read(&m->total_caps));
> +
> +	seq_printf(s, "\n");
>  	seq_printf(s, "item          total       avg_lat(us)     min_lat(us)     max_lat(us)     stdev(us)\n");
>  	seq_printf(s, "-----------------------------------------------------------------------------------\n");
>  
> diff --git a/fs/ceph/file.c b/fs/ceph/file.c
> index 04ab99c0223a..5fa28a620932 100644
> --- a/fs/ceph/file.c
> +++ b/fs/ceph/file.c
> @@ -205,6 +205,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
>  					int fmode, bool isdir)
>  {
>  	struct ceph_inode_info *ci = ceph_inode(inode);
> +	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
> +	struct ceph_mds_client *mdsc = fsc->mdsc;
>  	struct ceph_file_info *fi;
>  
>  	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
> @@ -212,20 +214,25 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
>  	BUG_ON(inode->i_fop->release != ceph_release);
>  
>  	if (isdir) {
> -		struct ceph_dir_file_info *dfi =
> -			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
> +		struct ceph_dir_file_info *dfi;
> +
> +		atomic64_dec(&mdsc->metric.dirs_opening);
> +		dfi = kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
>  		if (!dfi)
>  			return -ENOMEM;
>  
> +		atomic64_inc(&mdsc->metric.dirs_opened);
>  		file->private_data = dfi;
>  		fi = &dfi->file_info;
>  		dfi->next_offset = 2;
>  		dfi->readdir_cache_idx = -1;
>  	} else {
> +		atomic64_dec(&mdsc->metric.files_opening);
>  		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
>  		if (!fi)
>  			return -ENOMEM;
>  
> +		atomic64_inc(&mdsc->metric.files_opened);
>  		file->private_data = fi;
>  	}
>  

Bear in mind that if the same file has been opened several times, then
you'll get an increment for each.

Would it potentially be more useful to report the number of inodes that
have open file descriptions associated with them? It's hard for me to
know as I'm not clear on the intended use-case for this.

> @@ -371,6 +378,11 @@ int ceph_open(struct inode *inode, struct file *file)
>  		return ceph_init_file(inode, file, fmode);
>  	}
>  
> +	if (S_ISDIR(inode->i_mode))
> +		atomic64_inc(&mdsc->metric.dirs_opening);
> +	else
> +		atomic64_inc(&mdsc->metric.files_opening);
> +
>  	/*
>  	 * No need to block if we have caps on the auth MDS (for
>  	 * write) or any MDS (for read).  Update wanted set
> @@ -408,6 +420,8 @@ int ceph_open(struct inode *inode, struct file *file)
>  	req = prepare_open_request(inode->i_sb, flags, 0);
>  	if (IS_ERR(req)) {
>  		err = PTR_ERR(req);
> +		atomic64_dec(&mdsc->metric.dirs_opening);
> +		atomic64_dec(&mdsc->metric.files_opening);
>  		goto out;
>  	}
>  	req->r_inode = inode;
> @@ -783,12 +797,15 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
>  int ceph_release(struct inode *inode, struct file *file)
>  {
>  	struct ceph_inode_info *ci = ceph_inode(inode);
> +	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
> +	struct ceph_mds_client *mdsc = fsc->mdsc;
>  
>  	if (S_ISDIR(inode->i_mode)) {
>  		struct ceph_dir_file_info *dfi = file->private_data;
>  		dout("release inode %p dir file %p\n", inode, file);
>  		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
>  
> +		atomic64_dec(&mdsc->metric.dirs_opened);
>  		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
>  
>  		if (dfi->last_readdir)
> @@ -801,6 +818,7 @@ int ceph_release(struct inode *inode, struct file *file)
>  		dout("release inode %p regular file %p\n", inode, file);
>  		WARN_ON(!list_empty(&fi->rw_contexts));
>  
> +		atomic64_dec(&mdsc->metric.files_opened);
>  		ceph_put_fmode(ci, fi->fmode, 1);
>  
>  		kmem_cache_free(ceph_file_cachep, fi);
> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> index 2466b261fba2..bf49941e02bd 100644
> --- a/fs/ceph/metric.c
> +++ b/fs/ceph/metric.c
> @@ -192,6 +192,11 @@ int ceph_metric_init(struct ceph_client_metric *m)
>  	m->total_metadatas = 0;
>  	m->metadata_latency_sum = 0;
>  
> +        atomic64_set(&m->dirs_opening, 0);
> +        atomic64_set(&m->dirs_opened, 0);
> +        atomic64_set(&m->files_opening, 0);
> +        atomic64_set(&m->files_opened, 0);
> +
>  	m->session = NULL;
>  	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
>  
> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> index 1d0959d669d7..621d31d7b1e3 100644
> --- a/fs/ceph/metric.h
> +++ b/fs/ceph/metric.h
> @@ -115,6 +115,11 @@ struct ceph_client_metric {
>  	ktime_t metadata_latency_min;
>  	ktime_t metadata_latency_max;
>  
> +        atomic64_t dirs_opening;
> +        atomic64_t dirs_opened;
> +        atomic64_t files_opening;
> +        atomic64_t files_opened;
> +
>	struct ceph_mds_session *session;
>  	struct delayed_work delayed_work;  /* delayed work */
>  };

-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-18 20:05 ` Jeff Layton
@ 2020-08-18 20:10   ` Patrick Donnelly
  2020-08-18 20:40     ` Jeff Layton
  2020-08-19  2:01     ` Xiubo Li
  2020-08-19  1:52   ` Xiubo Li
  1 sibling, 2 replies; 8+ messages in thread
From: Patrick Donnelly @ 2020-08-18 20:10 UTC (permalink / raw)
  To: Jeff Layton; +Cc: Xiubo Li, Ilya Dryomov, Zheng Yan, Ceph Development

On Tue, Aug 18, 2020 at 1:05 PM Jeff Layton <jlayton@kernel.org> wrote:
> Bear in mind that if the same file has been opened several times, then
> you'll get an increment for each.

Having an open file count (even for the same inode) and a count of
inodes opened sounds useful to me. The latter would require some kind
of refcounting for each inode? Maybe that's too expensive though.

> Would it potentially be more useful to report the number of inodes that
> have open file descriptions associated with them? It's hard for me to
> know as I'm not clear on the intended use-case for this.

Use-case is more information available via `fs top`.

-- 
Patrick Donnelly, Ph.D.
He / Him / His
Principal Software Engineer
Red Hat Sunnyvale, CA
GPG: 19F28A586F808C2402351B93C3301A3E258DD79D


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-18 20:10   ` Patrick Donnelly
@ 2020-08-18 20:40     ` Jeff Layton
  2020-08-19  2:01     ` Xiubo Li
  1 sibling, 0 replies; 8+ messages in thread
From: Jeff Layton @ 2020-08-18 20:40 UTC (permalink / raw)
  To: Patrick Donnelly; +Cc: Xiubo Li, Ilya Dryomov, Zheng Yan, Ceph Development

On Tue, 2020-08-18 at 13:10 -0700, Patrick Donnelly wrote:
> On Tue, Aug 18, 2020 at 1:05 PM Jeff Layton <jlayton@kernel.org> wrote:
> > Bear in mind that if the same file has been opened several times, then
> > you'll get an increment for each.
> 
> Having an open file count (even for the same inode) and a count of
> inodes opened sounds useful to me. The latter would require some kind
> of refcounting for each inode? Maybe that's too expensive though.
> 

It might be useful. It really depends on what you want to do with this
info.

The MDS is generally interested in inodes and their caps, and it usually
knows how many opens we have, except in the case where we have
sufficient caps and can make the attempt w/o having to talk to it. Is
knowing an official number of actual open file descriptions helpful?
Why?

> > Would it potentially be more useful to report the number of inodes that
> > have open file descriptions associated with them? It's hard for me to
> > know as I'm not clear on the intended use-case for this.
> 
> Use-case is more information available via `fs top`.
> 

Which is, again, vague. What do you expect to do with this info? Why is
it useful? Articulating that up front will help us determine whether
we're measuring what we need to measure.
-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-18 20:05 ` Jeff Layton
  2020-08-18 20:10   ` Patrick Donnelly
@ 2020-08-19  1:52   ` Xiubo Li
  1 sibling, 0 replies; 8+ messages in thread
From: Xiubo Li @ 2020-08-19  1:52 UTC (permalink / raw)
  To: Jeff Layton; +Cc: idryomov, zyan, pdonnell, ceph-devel

On 2020/8/19 4:05, Jeff Layton wrote:
> On Tue, 2020-08-18 at 07:53 -0400, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> This will add the opening/opened dirs/files metric in the debugfs.
>>
>> item            total
>> ---------------------------
>> dirs opening    1
>> dirs opened     0
>> files opening   2
>> files opened    23
>> pinned caps     5442
>>
> Not much explanation for this patch or in the tracker below. I think
> this warrants a good description of the problems you intend to help
> solve with this.
>
> I assume you're looking at this to help track down "client not
> responding to cap recall" messages?
>
>> URL: https://tracker.ceph.com/issues/47005
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>   fs/ceph/debugfs.c |  9 +++++++++
>>   fs/ceph/file.c    | 22 ++++++++++++++++++++--
>>   fs/ceph/metric.c  |  5 +++++
>>   fs/ceph/metric.h  |  5 +++++
>>   4 files changed, 39 insertions(+), 2 deletions(-)
>>
>> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
>> index 97539b497e4c..20043382d825 100644
>> --- a/fs/ceph/debugfs.c
>> +++ b/fs/ceph/debugfs.c
>> @@ -148,6 +148,15 @@ static int metric_show(struct seq_file *s, void *p)
>>   	int nr_caps = 0;
>>   	s64 total, sum, avg, min, max, sq;
>>   
>> +	seq_printf(s, "item            total\n");
>> +	seq_printf(s, "---------------------------\n");
>> +	seq_printf(s, "dirs opening    %lld\n", atomic64_read(&m->dirs_opening));
>> +	seq_printf(s, "dirs opened     %lld\n", atomic64_read(&m->dirs_opened));
>> +	seq_printf(s, "files opening   %lld\n", atomic64_read(&m->files_opening));
>> +	seq_printf(s, "files opened    %lld\n", atomic64_read(&m->files_opened));
>> +	seq_printf(s, "pinned caps     %lld\n", atomic64_read(&m->total_caps));
>> +
>> +	seq_printf(s, "\n");
>>   	seq_printf(s, "item          total       avg_lat(us)     min_lat(us)     max_lat(us)     stdev(us)\n");
>>   	seq_printf(s, "-----------------------------------------------------------------------------------\n");
>>   
>> diff --git a/fs/ceph/file.c b/fs/ceph/file.c
>> index 04ab99c0223a..5fa28a620932 100644
>> --- a/fs/ceph/file.c
>> +++ b/fs/ceph/file.c
>> @@ -205,6 +205,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
>>   					int fmode, bool isdir)
>>   {
>>   	struct ceph_inode_info *ci = ceph_inode(inode);
>> +	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
>> +	struct ceph_mds_client *mdsc = fsc->mdsc;
>>   	struct ceph_file_info *fi;
>>   
>>   	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
>> @@ -212,20 +214,25 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
>>   	BUG_ON(inode->i_fop->release != ceph_release);
>>   
>>   	if (isdir) {
>> -		struct ceph_dir_file_info *dfi =
>> -			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
>> +		struct ceph_dir_file_info *dfi;
>> +
>> +		atomic64_dec(&mdsc->metric.dirs_opening);
>> +		dfi = kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
>>   		if (!dfi)
>>   			return -ENOMEM;
>>   
>> +		atomic64_inc(&mdsc->metric.dirs_opened);
>>   		file->private_data = dfi;
>>   		fi = &dfi->file_info;
>>   		dfi->next_offset = 2;
>>   		dfi->readdir_cache_idx = -1;
>>   	} else {
>> +		atomic64_dec(&mdsc->metric.files_opening);
>>   		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
>>   		if (!fi)
>>   			return -ENOMEM;
>>   
>> +		atomic64_inc(&mdsc->metric.files_opened);
>>   		file->private_data = fi;
>>   	}
>>   
> Bear in mind that if the same file has been opened several times, then
> you'll get an increment for each.

This is what this patch want to do. Just to count the total numbers of 
the opening/opened files/dirs.

> Would it potentially be more useful to report the number of inodes that
> have open file descriptions associated with them?

Yeah, this sounds useful too.


> It's hard for me to
> know as I'm not clear on the intended use-case for this.

IMO,It will very useful if we can know how many Inodes are pinned, how 
many are being used and the total number of users by each client, maybe 
some Inodes only pinned, but not used, this view could give us some idea 
doing some improvement on the code. Yeah, for the second we need to know 
how many Inodes have opened files.

Thanks

BRs

>> @@ -371,6 +378,11 @@ int ceph_open(struct inode *inode, struct file *file)
>>   		return ceph_init_file(inode, file, fmode);
>>   	}
>>   
>> +	if (S_ISDIR(inode->i_mode))
>> +		atomic64_inc(&mdsc->metric.dirs_opening);
>> +	else
>> +		atomic64_inc(&mdsc->metric.files_opening);
>> +
>>   	/*
>>   	 * No need to block if we have caps on the auth MDS (for
>>   	 * write) or any MDS (for read).  Update wanted set
>> @@ -408,6 +420,8 @@ int ceph_open(struct inode *inode, struct file *file)
>>   	req = prepare_open_request(inode->i_sb, flags, 0);
>>   	if (IS_ERR(req)) {
>>   		err = PTR_ERR(req);
>> +		atomic64_dec(&mdsc->metric.dirs_opening);
>> +		atomic64_dec(&mdsc->metric.files_opening);
>>   		goto out;
>>   	}
>>   	req->r_inode = inode;
>> @@ -783,12 +797,15 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
>>   int ceph_release(struct inode *inode, struct file *file)
>>   {
>>   	struct ceph_inode_info *ci = ceph_inode(inode);
>> +	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
>> +	struct ceph_mds_client *mdsc = fsc->mdsc;
>>   
>>   	if (S_ISDIR(inode->i_mode)) {
>>   		struct ceph_dir_file_info *dfi = file->private_data;
>>   		dout("release inode %p dir file %p\n", inode, file);
>>   		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
>>   
>> +		atomic64_dec(&mdsc->metric.dirs_opened);
>>   		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
>>   
>>   		if (dfi->last_readdir)
>> @@ -801,6 +818,7 @@ int ceph_release(struct inode *inode, struct file *file)
>>   		dout("release inode %p regular file %p\n", inode, file);
>>   		WARN_ON(!list_empty(&fi->rw_contexts));
>>   
>> +		atomic64_dec(&mdsc->metric.files_opened);
>>   		ceph_put_fmode(ci, fi->fmode, 1);
>>   
>>   		kmem_cache_free(ceph_file_cachep, fi);
>> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
>> index 2466b261fba2..bf49941e02bd 100644
>> --- a/fs/ceph/metric.c
>> +++ b/fs/ceph/metric.c
>> @@ -192,6 +192,11 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>   	m->total_metadatas = 0;
>>   	m->metadata_latency_sum = 0;
>>   
>> +        atomic64_set(&m->dirs_opening, 0);
>> +        atomic64_set(&m->dirs_opened, 0);
>> +        atomic64_set(&m->files_opening, 0);
>> +        atomic64_set(&m->files_opened, 0);
>> +
>>   	m->session = NULL;
>>   	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
>>   
>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
>> index 1d0959d669d7..621d31d7b1e3 100644
>> --- a/fs/ceph/metric.h
>> +++ b/fs/ceph/metric.h
>> @@ -115,6 +115,11 @@ struct ceph_client_metric {
>>   	ktime_t metadata_latency_min;
>>   	ktime_t metadata_latency_max;
>>   
>> +        atomic64_t dirs_opening;
>> +        atomic64_t dirs_opened;
>> +        atomic64_t files_opening;
>> +        atomic64_t files_opened;
>> +
>> 	struct ceph_mds_session *session;
>>   	struct delayed_work delayed_work;  /* delayed work */
>>   };



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-18 20:10   ` Patrick Donnelly
  2020-08-18 20:40     ` Jeff Layton
@ 2020-08-19  2:01     ` Xiubo Li
  2020-08-19 13:41       ` Jeff Layton
  1 sibling, 1 reply; 8+ messages in thread
From: Xiubo Li @ 2020-08-19  2:01 UTC (permalink / raw)
  To: Patrick Donnelly, Jeff Layton; +Cc: Ilya Dryomov, Zheng Yan, Ceph Development

On 2020/8/19 4:10, Patrick Donnelly wrote:
> On Tue, Aug 18, 2020 at 1:05 PM Jeff Layton <jlayton@kernel.org> wrote:
>> Bear in mind that if the same file has been opened several times, then
>> you'll get an increment for each.
> Having an open file count (even for the same inode) and a count of
> inodes opened sounds useful to me. The latter would require some kind
> of refcounting for each inode? Maybe that's too expensive though.

For the second, yes, we need one percpu refcount, which can be add in 
ceph_get_fmode() when increasing any entry of the 
ci->i_nr_by_mode[fmode] for the first time, to decrease it in 
ceph_put_fmode() when the ci->i_nr_by_mode[fmode] is empty. IMO, it 
should be okay and won't cost too much.

Thanks

BRs

>
>> Would it potentially be more useful to report the number of inodes that
>> have open file descriptions associated with them? It's hard for me to
>> know as I'm not clear on the intended use-case for this.
> Use-case is more information available via `fs top`.
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-19  2:01     ` Xiubo Li
@ 2020-08-19 13:41       ` Jeff Layton
  2020-08-19 13:44         ` Xiubo Li
  0 siblings, 1 reply; 8+ messages in thread
From: Jeff Layton @ 2020-08-19 13:41 UTC (permalink / raw)
  To: Xiubo Li, Patrick Donnelly; +Cc: Ilya Dryomov, Zheng Yan, Ceph Development

On Wed, 2020-08-19 at 10:01 +0800, Xiubo Li wrote:
> On 2020/8/19 4:10, Patrick Donnelly wrote:
> > On Tue, Aug 18, 2020 at 1:05 PM Jeff Layton <jlayton@kernel.org> wrote:
> > > Bear in mind that if the same file has been opened several times, then
> > > you'll get an increment for each.
> > Having an open file count (even for the same inode) and a count of
> > inodes opened sounds useful to me. The latter would require some kind
> > of refcounting for each inode? Maybe that's too expensive though.
> 
> For the second, yes, we need one percpu refcount, which can be add in 
> ceph_get_fmode() when increasing any entry of the 
> ci->i_nr_by_mode[fmode] for the first time, to decrease it in 
> ceph_put_fmode() when the ci->i_nr_by_mode[fmode] is empty. IMO, it 
> should be okay and won't cost too much.
> 
> Thanks
> 
> BRs
> 

Sure.

To be clear, I'm not _really_ disputing the usefulness of these stats,
but I think if we're going to measure stuff like this, the changelog
needs to justify it in some way.

We may find in 2-3 years that some of these are not as useful as we
first thought, and if we don't have any of the original justification in
the changelog for it, it's harder to determine whether removing them is
ok.

> > > Would it potentially be more useful to report the number of inodes that
> > > have open file descriptions associated with them? It's hard for me to
> > > know as I'm not clear on the intended use-case for this.
> > Use-case is more information available via `fs top`.
> > 

-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] ceph: add dirs/files' opened/opening metric support
  2020-08-19 13:41       ` Jeff Layton
@ 2020-08-19 13:44         ` Xiubo Li
  0 siblings, 0 replies; 8+ messages in thread
From: Xiubo Li @ 2020-08-19 13:44 UTC (permalink / raw)
  To: Jeff Layton, Patrick Donnelly; +Cc: Ilya Dryomov, Zheng Yan, Ceph Development

On 2020/8/19 21:41, Jeff Layton wrote:
> On Wed, 2020-08-19 at 10:01 +0800, Xiubo Li wrote:
>> On 2020/8/19 4:10, Patrick Donnelly wrote:
>>> On Tue, Aug 18, 2020 at 1:05 PM Jeff Layton <jlayton@kernel.org> wrote:
>>>> Bear in mind that if the same file has been opened several times, then
>>>> you'll get an increment for each.
>>> Having an open file count (even for the same inode) and a count of
>>> inodes opened sounds useful to me. The latter would require some kind
>>> of refcounting for each inode? Maybe that's too expensive though.
>> For the second, yes, we need one percpu refcount, which can be add in
>> ceph_get_fmode() when increasing any entry of the
>> ci->i_nr_by_mode[fmode] for the first time, to decrease it in
>> ceph_put_fmode() when the ci->i_nr_by_mode[fmode] is empty. IMO, it
>> should be okay and won't cost too much.
>>
>> Thanks
>>
>> BRs
>>
> Sure.
>
> To be clear, I'm not _really_ disputing the usefulness of these stats,
> but I think if we're going to measure stuff like this, the changelog
> needs to justify it in some way.
>
> We may find in 2-3 years that some of these are not as useful as we
> first thought, and if we don't have any of the original justification in
> the changelog for it, it's harder to determine whether removing them is
> ok.

This makes sense for me. Will add more details on the tracker and commit 
comment later.

Thanks.

>>>> Would it potentially be more useful to report the number of inodes that
>>>> have open file descriptions associated with them? It's hard for me to
>>>> know as I'm not clear on the intended use-case for this.
>>> Use-case is more information available via `fs top`.
>>>


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-08-19 13:45 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-18 11:53 [PATCH] ceph: add dirs/files' opened/opening metric support xiubli
2020-08-18 20:05 ` Jeff Layton
2020-08-18 20:10   ` Patrick Donnelly
2020-08-18 20:40     ` Jeff Layton
2020-08-19  2:01     ` Xiubo Li
2020-08-19 13:41       ` Jeff Layton
2020-08-19 13:44         ` Xiubo Li
2020-08-19  1:52   ` Xiubo Li

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.