linux-edac.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Shiju Jose <shiju.jose@huawei.com>
To: <linux-edac@vger.kernel.org>, <linux-acpi@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, <bp@alien8.de>,
	<tony.luck@intel.com>, <rjw@rjwysocki.net>, <james.morse@arm.com>,
	<lenb@kernel.org>
Cc: <linuxarm@huawei.com>, <shiju.jose@huawei.com>
Subject: [RFC PATCH 5/7] RAS/CEC: Add support for errors count check on short time period
Date: Fri, 2 Oct 2020 13:22:33 +0100	[thread overview]
Message-ID: <20201002122235.1280-6-shiju.jose@huawei.com> (raw)
In-Reply-To: <20201002122235.1280-1-shiju.jose@huawei.com>

Some types of elements, for example CPU core, should be isolated
when the corrected errors reported too often. This is used for the
early fault prediction and would help to prevent serious faults
by taking corrective actions.
Modify CEC to support for the errors count check on short
time period. Implementation details is added in the file.

Signed-off-by: Shiju Jose <shiju.jose@huawei.com>
---
 drivers/ras/cec.c | 125 ++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 109 insertions(+), 16 deletions(-)

diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index f869e7a270b8..ca52917d514c 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -119,6 +119,23 @@ static struct ce_array {
 					 * shift for element id.
 					 */
 
+	struct delayed_work work;	/*
+					 * delayed work.
+					 */
+
+	bool short_period;		/* Indicates threshold check for the error count
+					 * over short time period.
+					 */
+
+	u8 time_slot;			/*
+					 * time slot's number within the decay interval.
+					 */
+
+	union {
+		struct mutex	mutex;
+		spinlock_t	spin_lock;
+	};
+
 	union {
 		struct {
 			__u32	disabled : 1,	/* cmdline disabled */
@@ -128,7 +145,6 @@ static struct ce_array {
 	};
 } ce_arr;
 
-static DEFINE_MUTEX(ce_mutex);
 static u64 dfs_pfn;
 
 /* Amount of errors after which we offline */
@@ -138,9 +154,35 @@ static u64 action_threshold = COUNT_MASK;
 #define CEC_DECAY_DEFAULT_INTERVAL	24 * 60 * 60	/* 24 hrs */
 #define CEC_DECAY_MIN_INTERVAL		 1 * 60 * 60	/* 1h */
 #define CEC_DECAY_MAX_INTERVAL	   30 *	24 * 60 * 60	/* one month */
-static struct delayed_work cec_work;
 static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;
 
+/* Definitions for elements (for example CPU) for which
+ * error count on shrot time period is checked with threshold.
+ *
+ * An element such as a CPU core may need to isolate when large number of
+ * correctable errors are reported on that element too often. When the
+ * CEs count is exceeded the threshold value in a short time period.
+ *
+ * The decay interval is divided into a number of time slots. The CE collector
+ * calculates the average error count at the end of each decay interval. Then
+ * the average count would be subtracted from the total count in each following
+ * time slots. The work function for the decay interval would be set  for the
+ * reduced time period = decay interval/ number of time slots. When the new
+ * CE count for a cpu is added, the element would be offlined when the sum of
+ * the most recent CEs counts exceeded the CE threshold value.
+ */
+
+/*
+ * u64: [ 63 ELEM ID 23 | ELEM_STATUS_BIT 22 | 21 AVG_COUNT_BITS 12 | 11 DECAY_BITS 10 | 9 COUNT_BITS 0]
+ */
+
+/* Number of time slots in the decay interval */
+#define RAS_CEC_NUM_TIME_SLOTS	10
+
+#define AVG_COUNT_SHIFT	(DECAY_BITS + COUNT_BITS)
+#define ELEM_STATUS_BIT	BIT(22)	/* Indicates an element offlined by CEC */
+#define ELEM_ID_SHIFT	(1 + AVG_COUNT_SHIFT + COUNT_BITS)
+
 /*
  * Decrement decay value. We're using DECAY_BITS bits to denote decay of an
  * element in the array. On insertion and any access, it gets reset to max.
@@ -177,11 +219,62 @@ static void cec_mod_work(struct delayed_work *dwork, unsigned long interval)
 
 static void cec_work_fn(struct work_struct *work)
 {
-	mutex_lock(&ce_mutex);
-	do_spring_cleaning(&ce_arr);
-	mutex_unlock(&ce_mutex);
+	struct ce_array *ca;
+	unsigned long flags;
+	u64 avg_count;
+	int i, time_slots = 1;
+	struct delayed_work *d_work = container_of(work, struct delayed_work, work);
+
+	if (!d_work)
+		return;
+
+	ca = container_of(d_work, struct ce_array, work);
+	if (!ca->array || ca->disabled)
+		return;
 
-	cec_mod_work(&cec_work, decay_interval);
+	if (!ca->short_period) {
+		mutex_lock(&ca->mutex);
+		do_spring_cleaning(ca);
+		mutex_unlock(&ca->mutex);
+	} else {
+		time_slots = RAS_CEC_NUM_TIME_SLOTS;
+		spin_lock_irqsave(&ca->spin_lock, flags);
+		ca->time_slot = (ca->time_slot + 1) % RAS_CEC_NUM_TIME_SLOTS;
+
+		for (i = 0; i < ca->n; i++) {
+			if (ca->array[i] & ELEM_STATUS_BIT)
+				continue;
+
+			/* clear old errors count approximately by subtracting the avg count
+			 * from the total errors count.
+			 */
+			avg_count = (ca->array[i] >> AVG_COUNT_SHIFT) & COUNT_MASK;
+			ca->array[i] -= avg_count;
+		}
+
+		if (ca->time_slot) {
+			spin_unlock_irqrestore(&ca->spin_lock, flags);
+			goto exit;
+		}
+
+		for (i = 0; i < ca->n; i++) {
+			if (ca->array[i] & ELEM_STATUS_BIT)
+				continue;
+
+			/* calculate average error count for the completed time period */
+			avg_count = COUNT(ca->array[i]) / RAS_CEC_NUM_TIME_SLOTS;
+			ca->array[i] -= (COUNT(ca->array[i]) % RAS_CEC_NUM_TIME_SLOTS);
+			/* store average error count */
+			ca->array[i] &= ~(COUNT_MASK << AVG_COUNT_SHIFT);
+			ca->array[i] |= (avg_count << AVG_COUNT_SHIFT);
+		}
+
+		do_spring_cleaning(ca);
+		spin_unlock_irqrestore(&ca->spin_lock, flags);
+	}
+
+exit:
+	cec_mod_work(&ca->work, decay_interval/time_slots);
 }
 
 /*
@@ -279,9 +372,9 @@ static u64 __maybe_unused del_lru_elem(void)
 	if (!ca->n)
 		return 0;
 
-	mutex_lock(&ce_mutex);
+	mutex_lock(&ca->mutex);
 	pfn = del_lru_elem_unlocked(ca);
-	mutex_unlock(&ce_mutex);
+	mutex_unlock(&ca->mutex);
 
 	return pfn;
 }
@@ -328,7 +421,7 @@ static int cec_add_elem(u64 pfn)
 	if (!ce_arr.array || ce_arr.disabled)
 		return -ENODEV;
 
-	mutex_lock(&ce_mutex);
+	mutex_lock(&ca->mutex);
 
 	ca->ces_entered++;
 
@@ -386,7 +479,7 @@ static int cec_add_elem(u64 pfn)
 	WARN_ON_ONCE(sanity_check(ca));
 
 unlock:
-	mutex_unlock(&ce_mutex);
+	mutex_unlock(&ca->mutex);
 
 	return ret;
 }
@@ -420,7 +513,7 @@ static int decay_interval_set(void *data, u64 val)
 	*(u64 *)data   = val;
 	decay_interval = val;
 
-	cec_mod_work(&cec_work, decay_interval);
+	cec_mod_work(&ce_arr.work, decay_interval);
 
 	return 0;
 }
@@ -446,7 +539,7 @@ static int array_dump(struct seq_file *m, void *v)
 	struct ce_array *ca = &ce_arr;
 	int i;
 
-	mutex_lock(&ce_mutex);
+	mutex_lock(&ca->mutex);
 
 	seq_printf(m, "{ n: %d\n", ca->n);
 	for (i = 0; i < ca->n; i++) {
@@ -468,7 +561,7 @@ static int array_dump(struct seq_file *m, void *v)
 
 	seq_printf(m, "Action threshold: %lld\n", action_threshold);
 
-	mutex_unlock(&ce_mutex);
+	mutex_unlock(&ca->mutex);
 
 	return 0;
 }
@@ -583,9 +676,9 @@ static void __init cec_init(void)
 
 #if defined(CONFIG_X86_MCE)
 	ce_arr.id_shift = PAGE_SHIFT;
-	INIT_DELAYED_WORK(&cec_work, cec_work_fn);
-	schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
-
+	mutex_init(&ce_arr.mutex);
+	INIT_DELAYED_WORK(&ce_arr.work, cec_work_fn);
+	schedule_delayed_work(&ce_arr.work, CEC_DECAY_DEFAULT_INTERVAL);
 	mce_register_decode_chain(&cec_nb);
 #endif
 
-- 
2.17.1



  parent reply	other threads:[~2020-10-02 12:30 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-02 12:22 [RFC PATCH 0/7] RAS/CEC: Extend CEC for errors count check on short time period Shiju Jose
2020-10-02 12:22 ` [RFC PATCH 1/7] RAS/CEC: Replace the macro PFN with ELEM_NO Shiju Jose
2020-10-02 12:22 ` [RFC PATCH 2/7] RAS/CEC: Replace pfns_poisoned with elems_poisoned Shiju Jose
2020-10-02 12:22 ` [RFC PATCH 3/7] RAS/CEC: Move X86 MCE specific code under CONFIG_X86_MCE Shiju Jose
2020-10-02 12:22 ` [RFC PATCH 4/7] RAS/CEC: Modify cec_mod_work() for common use Shiju Jose
2020-10-02 12:22 ` Shiju Jose [this message]
2020-10-02 12:22 ` [RFC PATCH 6/7] RAS/CEC: Add CPU Correctable Error Collector to isolate an erroneous CPU core Shiju Jose
2020-10-02 12:22 ` [RFC PATCH 7/7] ACPI / APEI: Add reporting ARM64 CPU correctable errors to the CEC Shiju Jose
2020-10-02 12:43 ` [RFC PATCH 0/7] RAS/CEC: Extend CEC for errors count check on short time period Borislav Petkov
2020-10-02 15:38   ` Shiju Jose
2020-10-02 17:33     ` James Morse
2020-10-02 18:02       ` Borislav Petkov
2020-10-06 16:13       ` Shiju Jose
2020-10-07 16:45         ` James Morse
2020-10-02 16:04   ` Luck, Tony

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201002122235.1280-6-shiju.jose@huawei.com \
    --to=shiju.jose@huawei.com \
    --cc=bp@alien8.de \
    --cc=james.morse@arm.com \
    --cc=lenb@kernel.org \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-edac@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxarm@huawei.com \
    --cc=rjw@rjwysocki.net \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).