* [PATCH v2] libceph: add osd op counter metric support
@ 2020-11-10 11:01 xiubli
2020-11-10 12:42 ` Jeff Layton
0 siblings, 1 reply; 3+ messages in thread
From: xiubli @ 2020-11-10 11:01 UTC (permalink / raw)
To: jlayton, idryomov; +Cc: zyan, pdonnell, ceph-devel, Xiubo Li
From: Xiubo Li <xiubli@redhat.com>
The logic is the same with osdc/Objecter.cc in ceph in user space.
URL: https://tracker.ceph.com/issues/48053
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
V2:
- remove other not used counter metrics
include/linux/ceph/osd_client.h | 9 ++++++
net/ceph/debugfs.c | 13 ++++++++
net/ceph/osd_client.c | 56 +++++++++++++++++++++++++++++++++
3 files changed, 78 insertions(+)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 83fa08a06507..24301513b186 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -339,6 +339,13 @@ struct ceph_osd_backoff {
struct ceph_hobject_id *end;
};
+struct ceph_osd_metric {
+ struct percpu_counter op_ops;
+ struct percpu_counter op_rmw;
+ struct percpu_counter op_r;
+ struct percpu_counter op_w;
+};
+
#define CEPH_LINGER_ID_START 0xffff000000000000ULL
struct ceph_osd_client {
@@ -371,6 +378,8 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op;
struct ceph_msgpool msgpool_op_reply;
+ struct ceph_osd_metric metric;
+
struct workqueue_struct *notify_wq;
struct workqueue_struct *completion_wq;
};
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 2110439f8a24..af90019386ab 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -339,6 +339,16 @@ static void dump_backoffs(struct seq_file *s, struct ceph_osd *osd)
mutex_unlock(&osd->lock);
}
+static void dump_op_metric(struct seq_file *s, struct ceph_osd_client *osdc)
+{
+ struct ceph_osd_metric *m = &osdc->metric;
+
+ seq_printf(s, " op_ops: %lld\n", percpu_counter_sum(&m->op_ops));
+ seq_printf(s, " op_rmw: %lld\n", percpu_counter_sum(&m->op_rmw));
+ seq_printf(s, " op_r: %lld\n", percpu_counter_sum(&m->op_r));
+ seq_printf(s, " op_w: %lld\n", percpu_counter_sum(&m->op_w));
+}
+
static int osdc_show(struct seq_file *s, void *pp)
{
struct ceph_client *client = s->private;
@@ -372,6 +382,9 @@ static int osdc_show(struct seq_file *s, void *pp)
}
up_read(&osdc->lock);
+
+ seq_puts(s, "OP METRIC:\n");
+ dump_op_metric(s, osdc);
return 0;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 7901ab6c79fd..66774b2bc584 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2424,6 +2424,21 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
goto again;
}
+static void osd_acount_op_metric(struct ceph_osd_request *req)
+{
+ struct ceph_osd_metric *m = &req->r_osdc->metric;
+
+ percpu_counter_inc(&m->op_ops);
+
+ if ((req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
+ == (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
+ percpu_counter_inc(&m->op_rmw);
+ if (req->r_flags & CEPH_OSD_FLAG_READ)
+ percpu_counter_inc(&m->op_r);
+ else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
+ percpu_counter_inc(&m->op_w);
+}
+
static void account_request(struct ceph_osd_request *req)
{
WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
@@ -2434,6 +2449,8 @@ static void account_request(struct ceph_osd_request *req)
req->r_start_stamp = jiffies;
req->r_start_latency = ktime_get();
+
+ osd_acount_op_metric(req);
}
static void submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -5205,6 +5222,39 @@ void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
up_write(&osdc->lock);
}
+static void ceph_metric_destroy(struct ceph_osd_metric *m)
+{
+ percpu_counter_destroy(&m->op_ops);
+ percpu_counter_destroy(&m->op_rmw);
+ percpu_counter_destroy(&m->op_r);
+ percpu_counter_destroy(&m->op_w);
+}
+
+static int ceph_metric_init(struct ceph_osd_metric *m)
+{
+ int ret;
+
+ memset(m, 0, sizeof(*m));
+
+ ret = percpu_counter_init(&m->op_ops, 0, GFP_NOIO);
+ if (ret)
+ return ret;
+ ret = percpu_counter_init(&m->op_rmw, 0, GFP_NOIO);
+ if (ret)
+ goto err;
+ ret = percpu_counter_init(&m->op_r, 0, GFP_NOIO);
+ if (ret)
+ goto err;
+ ret = percpu_counter_init(&m->op_w, 0, GFP_NOIO);
+ if (ret)
+ goto err;
+ return 0;
+
+err:
+ ceph_metric_destroy(m);
+ return ret;
+}
+
/*
* init, shutdown
*/
@@ -5257,6 +5307,9 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
if (!osdc->completion_wq)
goto out_notify_wq;
+ if (ceph_metric_init(&osdc->metric) < 0)
+ goto out_completion_wq;
+
schedule_delayed_work(&osdc->timeout_work,
osdc->client->options->osd_keepalive_timeout);
schedule_delayed_work(&osdc->osds_timeout_work,
@@ -5264,6 +5317,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
return 0;
+out_completion_wq:
+ destroy_workqueue(osdc->completion_wq);
out_notify_wq:
destroy_workqueue(osdc->notify_wq);
out_msgpool_reply:
@@ -5302,6 +5357,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
WARN_ON(atomic_read(&osdc->num_requests));
WARN_ON(atomic_read(&osdc->num_homeless));
+ ceph_metric_destroy(&osdc->metric);
ceph_osdmap_destroy(osdc->osdmap);
mempool_destroy(osdc->req_mempool);
ceph_msgpool_destroy(&osdc->msgpool_op);
--
2.27.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2] libceph: add osd op counter metric support
2020-11-10 11:01 [PATCH v2] libceph: add osd op counter metric support xiubli
@ 2020-11-10 12:42 ` Jeff Layton
2020-11-10 12:53 ` Xiubo Li
0 siblings, 1 reply; 3+ messages in thread
From: Jeff Layton @ 2020-11-10 12:42 UTC (permalink / raw)
To: xiubli, idryomov; +Cc: zyan, pdonnell, ceph-devel
On Tue, 2020-11-10 at 19:01 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
>
> The logic is the same with osdc/Objecter.cc in ceph in user space.
>
> URL: https://tracker.ceph.com/issues/48053
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
>
> V2:
> - remove other not used counter metrics
>
> include/linux/ceph/osd_client.h | 9 ++++++
> net/ceph/debugfs.c | 13 ++++++++
> net/ceph/osd_client.c | 56 +++++++++++++++++++++++++++++++++
> 3 files changed, 78 insertions(+)
>
> diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
> index 83fa08a06507..24301513b186 100644
> --- a/include/linux/ceph/osd_client.h
> +++ b/include/linux/ceph/osd_client.h
> @@ -339,6 +339,13 @@ struct ceph_osd_backoff {
> struct ceph_hobject_id *end;
> };
>
>
>
>
> +struct ceph_osd_metric {
> + struct percpu_counter op_ops;
> + struct percpu_counter op_rmw;
> + struct percpu_counter op_r;
> + struct percpu_counter op_w;
> +};
> +
> #define CEPH_LINGER_ID_START 0xffff000000000000ULL
>
>
>
>
> struct ceph_osd_client {
> @@ -371,6 +378,8 @@ struct ceph_osd_client {
> struct ceph_msgpool msgpool_op;
> struct ceph_msgpool msgpool_op_reply;
>
>
>
>
> + struct ceph_osd_metric metric;
> +
> struct workqueue_struct *notify_wq;
> struct workqueue_struct *completion_wq;
> };
> diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
> index 2110439f8a24..af90019386ab 100644
> --- a/net/ceph/debugfs.c
> +++ b/net/ceph/debugfs.c
> @@ -339,6 +339,16 @@ static void dump_backoffs(struct seq_file *s, struct ceph_osd *osd)
> mutex_unlock(&osd->lock);
> }
>
>
>
>
> +static void dump_op_metric(struct seq_file *s, struct ceph_osd_client *osdc)
> +{
> + struct ceph_osd_metric *m = &osdc->metric;
> +
> + seq_printf(s, " op_ops: %lld\n", percpu_counter_sum(&m->op_ops));
> + seq_printf(s, " op_rmw: %lld\n", percpu_counter_sum(&m->op_rmw));
> + seq_printf(s, " op_r: %lld\n", percpu_counter_sum(&m->op_r));
> + seq_printf(s, " op_w: %lld\n", percpu_counter_sum(&m->op_w));
> +}
> +
> static int osdc_show(struct seq_file *s, void *pp)
> {
> struct ceph_client *client = s->private;
> @@ -372,6 +382,9 @@ static int osdc_show(struct seq_file *s, void *pp)
> }
>
>
>
>
> up_read(&osdc->lock);
> +
> + seq_puts(s, "OP METRIC:\n");
> + dump_op_metric(s, osdc);
> return 0;
> }
>
>
>
>
> diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
> index 7901ab6c79fd..66774b2bc584 100644
> --- a/net/ceph/osd_client.c
> +++ b/net/ceph/osd_client.c
> @@ -2424,6 +2424,21 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
> goto again;
> }
>
>
>
>
> +static void osd_acount_op_metric(struct ceph_osd_request *req)
> +{
> + struct ceph_osd_metric *m = &req->r_osdc->metric;
> +
> + percpu_counter_inc(&m->op_ops);
> +
> + if ((req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
> + == (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
> + percpu_counter_inc(&m->op_rmw);
What's the point of or'ing the same flag together, and how is this
different from the read one below? Was it supposed to be or'ed with
CEPH_OSD_FLAG_WRITE ?
> + if (req->r_flags & CEPH_OSD_FLAG_READ)
> + percpu_counter_inc(&m->op_r);
> + else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
> + percpu_counter_inc(&m->op_w);
> +}
> +
> static void account_request(struct ceph_osd_request *req)
> {
> WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
> @@ -2434,6 +2449,8 @@ static void account_request(struct ceph_osd_request *req)
>
>
>
>
> req->r_start_stamp = jiffies;
> req->r_start_latency = ktime_get();
> +
> + osd_acount_op_metric(req);
> }
>
>
>
>
> static void submit_request(struct ceph_osd_request *req, bool wrlocked)
> @@ -5205,6 +5222,39 @@ void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
> up_write(&osdc->lock);
> }
>
>
>
>
> +static void ceph_metric_destroy(struct ceph_osd_metric *m)
> +{
> + percpu_counter_destroy(&m->op_ops);
> + percpu_counter_destroy(&m->op_rmw);
> + percpu_counter_destroy(&m->op_r);
> + percpu_counter_destroy(&m->op_w);
> +}
> +
> +static int ceph_metric_init(struct ceph_osd_metric *m)
> +{
> + int ret;
> +
> + memset(m, 0, sizeof(*m));
> +
> + ret = percpu_counter_init(&m->op_ops, 0, GFP_NOIO);
> + if (ret)
> + return ret;
> + ret = percpu_counter_init(&m->op_rmw, 0, GFP_NOIO);
> + if (ret)
> + goto err;
> + ret = percpu_counter_init(&m->op_r, 0, GFP_NOIO);
> + if (ret)
> + goto err;
> + ret = percpu_counter_init(&m->op_w, 0, GFP_NOIO);
> + if (ret)
> + goto err;
> + return 0;
> +
> +err:
> + ceph_metric_destroy(m);
> + return ret;
> +}
> +
> /*
> * init, shutdown
> */
> @@ -5257,6 +5307,9 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
> if (!osdc->completion_wq)
> goto out_notify_wq;
>
>
>
>
> + if (ceph_metric_init(&osdc->metric) < 0)
> + goto out_completion_wq;
> +
> schedule_delayed_work(&osdc->timeout_work,
> osdc->client->options->osd_keepalive_timeout);
> schedule_delayed_work(&osdc->osds_timeout_work,
> @@ -5264,6 +5317,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
>
>
>
>
> return 0;
>
>
>
>
> +out_completion_wq:
> + destroy_workqueue(osdc->completion_wq);
> out_notify_wq:
> destroy_workqueue(osdc->notify_wq);
> out_msgpool_reply:
> @@ -5302,6 +5357,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
> WARN_ON(atomic_read(&osdc->num_requests));
> WARN_ON(atomic_read(&osdc->num_homeless));
>
>
>
>
> + ceph_metric_destroy(&osdc->metric);
> ceph_osdmap_destroy(osdc->osdmap);
> mempool_destroy(osdc->req_mempool);
> ceph_msgpool_destroy(&osdc->msgpool_op);
--
Jeff Layton <jlayton@kernel.org>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] libceph: add osd op counter metric support
2020-11-10 12:42 ` Jeff Layton
@ 2020-11-10 12:53 ` Xiubo Li
0 siblings, 0 replies; 3+ messages in thread
From: Xiubo Li @ 2020-11-10 12:53 UTC (permalink / raw)
To: Jeff Layton, idryomov; +Cc: zyan, pdonnell, ceph-devel
On 2020/11/10 20:42, Jeff Layton wrote:
> On Tue, 2020-11-10 at 19:01 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> The logic is the same with osdc/Objecter.cc in ceph in user space.
>>
>> URL: https://tracker.ceph.com/issues/48053
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>
>> V2:
>> - remove other not used counter metrics
>>
>> include/linux/ceph/osd_client.h | 9 ++++++
>> net/ceph/debugfs.c | 13 ++++++++
>> net/ceph/osd_client.c | 56 +++++++++++++++++++++++++++++++++
>> 3 files changed, 78 insertions(+)
>>
>> diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
>> index 83fa08a06507..24301513b186 100644
>> --- a/include/linux/ceph/osd_client.h
>> +++ b/include/linux/ceph/osd_client.h
>> @@ -339,6 +339,13 @@ struct ceph_osd_backoff {
>> struct ceph_hobject_id *end;
>> };
>>
>>
>>
>>
>> +struct ceph_osd_metric {
>> + struct percpu_counter op_ops;
>> + struct percpu_counter op_rmw;
>> + struct percpu_counter op_r;
>> + struct percpu_counter op_w;
>> +};
>> +
>> #define CEPH_LINGER_ID_START 0xffff000000000000ULL
>>
>>
>>
>>
>> struct ceph_osd_client {
>> @@ -371,6 +378,8 @@ struct ceph_osd_client {
>> struct ceph_msgpool msgpool_op;
>> struct ceph_msgpool msgpool_op_reply;
>>
>>
>>
>>
>> + struct ceph_osd_metric metric;
>> +
>> struct workqueue_struct *notify_wq;
>> struct workqueue_struct *completion_wq;
>> };
>> diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
>> index 2110439f8a24..af90019386ab 100644
>> --- a/net/ceph/debugfs.c
>> +++ b/net/ceph/debugfs.c
>> @@ -339,6 +339,16 @@ static void dump_backoffs(struct seq_file *s, struct ceph_osd *osd)
>> mutex_unlock(&osd->lock);
>> }
>>
>>
>>
>>
>> +static void dump_op_metric(struct seq_file *s, struct ceph_osd_client *osdc)
>> +{
>> + struct ceph_osd_metric *m = &osdc->metric;
>> +
>> + seq_printf(s, " op_ops: %lld\n", percpu_counter_sum(&m->op_ops));
>> + seq_printf(s, " op_rmw: %lld\n", percpu_counter_sum(&m->op_rmw));
>> + seq_printf(s, " op_r: %lld\n", percpu_counter_sum(&m->op_r));
>> + seq_printf(s, " op_w: %lld\n", percpu_counter_sum(&m->op_w));
>> +}
>> +
>> static int osdc_show(struct seq_file *s, void *pp)
>> {
>> struct ceph_client *client = s->private;
>> @@ -372,6 +382,9 @@ static int osdc_show(struct seq_file *s, void *pp)
>> }
>>
>>
>>
>>
>> up_read(&osdc->lock);
>> +
>> + seq_puts(s, "OP METRIC:\n");
>> + dump_op_metric(s, osdc);
>> return 0;
>> }
>>
>>
>>
>>
>> diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
>> index 7901ab6c79fd..66774b2bc584 100644
>> --- a/net/ceph/osd_client.c
>> +++ b/net/ceph/osd_client.c
>> @@ -2424,6 +2424,21 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
>> goto again;
>> }
>>
>>
>>
>>
>> +static void osd_acount_op_metric(struct ceph_osd_request *req)
>> +{
>> + struct ceph_osd_metric *m = &req->r_osdc->metric;
>> +
>> + percpu_counter_inc(&m->op_ops);
>> +
>> + if ((req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
>> + == (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_READ))
>> + percpu_counter_inc(&m->op_rmw);
> What's the point of or'ing the same flag together, and how is this
> different from the read one below? Was it supposed to be or'ed with
> CEPH_OSD_FLAG_WRITE ?
Yeah, It should be READ | WRITE instead.
Will fix it.
Thanks
>
>> + if (req->r_flags & CEPH_OSD_FLAG_READ)
>> + percpu_counter_inc(&m->op_r);
>> + else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
>> + percpu_counter_inc(&m->op_w);
>> +}
>> +
>> static void account_request(struct ceph_osd_request *req)
>> {
>> WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
>> @@ -2434,6 +2449,8 @@ static void account_request(struct ceph_osd_request *req)
>>
>>
>>
>>
>> req->r_start_stamp = jiffies;
>> req->r_start_latency = ktime_get();
>> +
>> + osd_acount_op_metric(req);
>> }
>>
>>
>>
>>
>> static void submit_request(struct ceph_osd_request *req, bool wrlocked)
>> @@ -5205,6 +5222,39 @@ void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
>> up_write(&osdc->lock);
>> }
>>
>>
>>
>>
>> +static void ceph_metric_destroy(struct ceph_osd_metric *m)
>> +{
>> + percpu_counter_destroy(&m->op_ops);
>> + percpu_counter_destroy(&m->op_rmw);
>> + percpu_counter_destroy(&m->op_r);
>> + percpu_counter_destroy(&m->op_w);
>> +}
>> +
>> +static int ceph_metric_init(struct ceph_osd_metric *m)
>> +{
>> + int ret;
>> +
>> + memset(m, 0, sizeof(*m));
>> +
>> + ret = percpu_counter_init(&m->op_ops, 0, GFP_NOIO);
>> + if (ret)
>> + return ret;
>> + ret = percpu_counter_init(&m->op_rmw, 0, GFP_NOIO);
>> + if (ret)
>> + goto err;
>> + ret = percpu_counter_init(&m->op_r, 0, GFP_NOIO);
>> + if (ret)
>> + goto err;
>> + ret = percpu_counter_init(&m->op_w, 0, GFP_NOIO);
>> + if (ret)
>> + goto err;
>> + return 0;
>> +
>> +err:
>> + ceph_metric_destroy(m);
>> + return ret;
>> +}
>> +
>> /*
>> * init, shutdown
>> */
>> @@ -5257,6 +5307,9 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
>> if (!osdc->completion_wq)
>> goto out_notify_wq;
>>
>>
>>
>>
>> + if (ceph_metric_init(&osdc->metric) < 0)
>> + goto out_completion_wq;
>> +
>> schedule_delayed_work(&osdc->timeout_work,
>> osdc->client->options->osd_keepalive_timeout);
>> schedule_delayed_work(&osdc->osds_timeout_work,
>> @@ -5264,6 +5317,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
>>
>>
>>
>>
>> return 0;
>>
>>
>>
>>
>> +out_completion_wq:
>> + destroy_workqueue(osdc->completion_wq);
>> out_notify_wq:
>> destroy_workqueue(osdc->notify_wq);
>> out_msgpool_reply:
>> @@ -5302,6 +5357,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
>> WARN_ON(atomic_read(&osdc->num_requests));
>> WARN_ON(atomic_read(&osdc->num_homeless));
>>
>>
>>
>>
>> + ceph_metric_destroy(&osdc->metric);
>> ceph_osdmap_destroy(osdc->osdmap);
>> mempool_destroy(osdc->req_mempool);
>> ceph_msgpool_destroy(&osdc->msgpool_op);
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-11-10 12:53 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-10 11:01 [PATCH v2] libceph: add osd op counter metric support xiubli
2020-11-10 12:42 ` Jeff Layton
2020-11-10 12:53 ` Xiubo Li
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.