linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Vaibhav Jain <vaibhav@linux.ibm.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Vaibhav Jain <vaibhav@linux.ibm.com>,
	Michael Ellerman <ellerman@au1.ibm.com>,
	Oliver O'Halloran <oohall@gmail.com>,
	Alastair D'Silva <alastair@au1.ibm.com>,
	"Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Subject: [PATCH 3/8] powerpc/papr_scm: Fetch dimm performance stats from PHYP
Date: Thu, 20 Feb 2020 15:28:00 +0530	[thread overview]
Message-ID: <20200220095805.197229-4-vaibhav@linux.ibm.com> (raw)
In-Reply-To: <20200220095805.197229-1-vaibhav@linux.ibm.com>

Implement support for fetching dimm performance metrics via
H_SCM_PERFORMANCE_HEALTH hcall as documented in Ref[1]. The hcall
returns a structure as described in Ref[1] and defined as newly
introduced 'struct papr_scm_perf_stats'. The struct has a header
followed by key-value pairs of performance attributes. The 'key' part
is a 8-byte char array naming the attribute encoded as a __be64
integer. This makes the output buffer format for the hcall self
describing and can be easily interpreted.

This patch implements functionality to fetch these performance stats
and reporting them via a nvdimm sysfs attribute named
'papr_perf_stats'.

A new function drc_pmem_query_stats() is implemented that issues hcall
H_SCM_PERFORMANCE_HEALTH ,requesting PHYP to store performance stats
in pre-allocated 'struct papr_scm_perf_stats' buffer. During nvdimm
initialization in papr_scm_nvdimm_init() this function is called with
an empty buffer to know the max buffer size needed for issuing the
H_SCM_PERFORMANCE_HEALTH hcall. The buffer size retrieved is stored in
newly introduced 'struct papc_scm_priv.len_stat_buffer' for later
retrival.

[1]: commit 58b278f568f0 ("powerpc: Provide initial documentation for
PAPR hcalls")

Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
---
 arch/powerpc/platforms/pseries/papr_scm.c | 107 ++++++++++++++++++++++
 1 file changed, 107 insertions(+)

diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index aaf2e4ab1f75..28143a681aa2 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -47,6 +47,9 @@ struct papr_scm_priv {
 	/* Health information for the dimm */
 	__be64 health_bitmap;
 	__be64 health_bitmap_valid;
+
+	/* length of the stat buffer as expected by phyp */
+	size_t len_stat_buffer;
 };
 
 static int drc_pmem_bind(struct papr_scm_priv *p)
@@ -152,6 +155,50 @@ static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
 	return drc_pmem_bind(p);
 }
 
+static int drc_pmem_query_stats(struct papr_scm_priv *p,
+				struct papr_scm_perf_stats *stats,
+				size_t size, uint64_t *out)
+{
+	unsigned long ret[PLPAR_HCALL_BUFSIZE];
+	int64_t rc;
+
+	/* In case of no out buffer ignore the size */
+	if (!stats)
+		size = 0;
+
+	/*
+	 * Do the HCALL asking PHYP for info and if R4 was requested
+	 * return its value in 'out' variable.
+	 */
+	rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
+			 __pa(stats), size);
+	if (out)
+		*out =  be64_to_cpu(ret[0]);
+
+	switch (rc) {
+	case H_SUCCESS:
+		/* Handle the case where size of stat buffer was requested */
+		if (size != 0)
+			dev_dbg(&p->pdev->dev,
+				"Performance stats returned %d stats\n",
+				be32_to_cpu(stats->num_statistics));
+		else
+			dev_dbg(&p->pdev->dev,
+				"Performance stats size %lld\n",
+				be64_to_cpu(ret[0]));
+		return 0;
+	case H_PARTIAL:
+		dev_err(&p->pdev->dev,
+			 "Unknown performance stats, Err:0x%016llX\n",
+			be64_to_cpu(ret[0]));
+		return -ENOENT;
+	default:
+		dev_err(&p->pdev->dev,
+			 "Failed to query performance stats, Err:%lld\n", rc);
+		return -ENXIO;
+	}
+}
+
 static int drc_pmem_query_health(struct papr_scm_priv *p)
 {
 	unsigned long ret[PLPAR_HCALL_BUFSIZE];
@@ -341,6 +388,53 @@ static inline int papr_scm_node(int node)
 	return min_node;
 }
 
+static ssize_t papr_perf_stats_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *dimm = to_nvdimm(dev);
+	struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+	struct papr_scm_perf_stats *retbuffer;
+	struct papr_scm_perf_stat *stat;
+	uint64_t statid, val;
+	int rc, i;
+
+	if (!p->len_stat_buffer)
+		return -ENOENT;
+
+	/* Return buffer for phyp where stats are written */
+	retbuffer = kzalloc(p->len_stat_buffer, GFP_KERNEL);
+	if (!retbuffer)
+		return -ENOMEM;
+
+	/* Setup the buffer */
+	memcpy(retbuffer->eye_catcher, PAPR_SCM_PERF_STATS_EYECATCHER,
+	       sizeof(retbuffer->eye_catcher));
+	retbuffer->stats_version = cpu_to_be32(0x1);
+	retbuffer->num_statistics = 0;
+
+	rc = drc_pmem_query_stats(p, retbuffer, p->len_stat_buffer, NULL);
+	if (rc)
+		goto out;
+
+	/*
+	 * Go through the returned output buffer and print stats and values.
+	 * Since statistic_id is essentially a char string of 8 bytes encoded
+	 * as a __be64, simply use the string format specifier to print it.
+	 */
+	for (i = 0, stat = retbuffer->scm_statistics;
+	    i < be32_to_cpu(retbuffer->num_statistics); ++i, ++stat) {
+		statid = be64_to_cpu(stat->statistic_id);
+		val = be64_to_cpu(stat->statistic_value);
+		rc += sprintf(buf + rc, "%.8s => 0x%016llX\n",
+			      (char *) &(statid), val);
+	}
+out:
+	kfree(retbuffer);
+	return rc;
+
+}
+DEVICE_ATTR_RO(papr_perf_stats);
+
 static ssize_t papr_flags_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -390,6 +484,7 @@ DEVICE_ATTR_RO(papr_flags);
 /* papr_scm specific dimm attributes */
 static struct attribute *papr_scm_nd_attributes[] = {
 	&dev_attr_papr_flags.attr,
+	&dev_attr_papr_perf_stats.attr,
 	NULL,
 };
 
@@ -409,6 +504,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
 	struct nd_region_desc ndr_desc;
 	unsigned long dimm_flags;
 	int target_nid, online_nid;
+	uint64_t stat_size;
 
 	p->bus_desc.ndctl = papr_scm_ndctl;
 	p->bus_desc.module = THIS_MODULE;
@@ -470,6 +566,17 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
 		dev_info(dev, "Region registered with target node %d and online node %d",
 			 target_nid, online_nid);
 
+	/* Try retriving the stat buffer and see if its supported */
+	if (!drc_pmem_query_stats(p, NULL, 0, &stat_size)) {
+		p->len_stat_buffer = (size_t)stat_size;
+		dev_dbg(&p->pdev->dev, "Max dimm perf stats size %ld bytes\n",
+			p->len_stat_buffer);
+	} else {
+		p->len_stat_buffer = 0;
+		dev_dbg(&p->pdev->dev, "Unable to retrieve performace stats\n");
+		dev_info(&p->pdev->dev, "Limited dimm info available\n");
+	}
+
 	return 0;
 
 err:	nvdimm_bus_unregister(p->bus);
-- 
2.24.1


  parent reply	other threads:[~2020-02-20 10:07 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-20  9:57 [PATCH 0/7] powerpc/papr_scm: Add support for reporting nvdimm health Vaibhav Jain
2020-02-20  9:57 ` [PATCH 1/8] powerpc: Add asm header 'papr_scm.h' describing the papr-scm interface Vaibhav Jain
2020-03-09 10:07   ` Aneesh Kumar K.V
2020-02-20  9:57 ` [PATCH 2/8] powerpc/papr_scm: Provide support for fetching dimm health information Vaibhav Jain
2020-03-09 10:24   ` Aneesh Kumar K.V
2020-02-20  9:58 ` Vaibhav Jain [this message]
2020-03-09 10:28   ` [PATCH 3/8] powerpc/papr_scm: Fetch dimm performance stats from PHYP Aneesh Kumar K.V
2020-02-20  9:58 ` [PATCH 4/8] UAPI: ndctl: Introduce NVDIMM_FAMILY_PAPR_SCM as a new NVDIMM DSM family Vaibhav Jain
2020-02-20  9:58 ` [PATCH 5/8] powerpc/uapi: Introduce uapi header 'papr_scm_dsm.h' for papr_scm DSMs Vaibhav Jain
2020-02-20  9:58 ` [PATCH 6/8] powerpc/papr_scm: Add support for handling PAPR DSM commands Vaibhav Jain
2020-02-20  9:58 ` [PATCH 7/8] powerpc/papr_scm: Re-implement 'papr_flags' using 'nd_papr_scm_dimm_health_stat' Vaibhav Jain
2020-03-09 10:27   ` Aneesh Kumar K.V
2020-02-20  9:58 ` [PATCH 8/8] powerpc/papr_scm: Implement support for DSM_PAPR_SCM_HEALTH Vaibhav Jain
2020-03-09 10:58   ` Aneesh Kumar K.V

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200220095805.197229-4-vaibhav@linux.ibm.com \
    --to=vaibhav@linux.ibm.com \
    --cc=alastair@au1.ibm.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=ellerman@au1.ibm.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=oohall@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).