All of lore.kernel.org
 help / color / mirror / Atom feed
From: Linda Knippers <linda.knippers@hpe.com>
To: Vishal Verma <vishal.l.verma@intel.com>
Cc: linux-kernel@vger.kernel.org, Tony Luck <tony.luck@intel.com>,
	linux-acpi@vger.kernel.org, Rafael J.
Subject: Re: [PATCH v2 3/3] nfit: do an ARS scrub on hitting a latent media error
Date: Thu, 21 Jul 2016 17:25:18 -0400	[thread overview]
Message-ID: <55fdd656-deb7-56a7-cd4f-2f17599b4c2e@hpe.com> (raw)
In-Reply-To: <20160721211039.GH12960@omniknight.lm.intel.com>



On 7/21/2016 5:10 PM, Vishal Verma wrote:
> On 07/21, Linda Knippers wrote:
>>
>>
>> On 7/20/2016 9:50 PM, Vishal Verma wrote:
>>> When a latent (unknown to 'badblocks') error is encountered, it will
>>> trigger a machine check exception. On a system with machine check
>>> recovery, this will only SIGBUS the process(es) which had the bad page
>>> mapped (as opposed to a kernel panic on platforms without machine
>>> check recovery features). In the former case, we want to trigger a full
>>> rescan of that nvdimm bus. This will allow any additional, new errors
>>> to be captured in the block devices' badblocks lists, and offending
>>> operations on them can be trapped early, avoiding machine checks.
>>
>> Do we really need to rescan all SPA ranges?  If the problem is with
>> an NVDIMM, wouldn't the blast radius be the device or it's interleave set,
>> which would be part of the same SPA range?
>>
>> I don't know what the overhead associated with a scan is which is why I'm asking.
> 
> You're right that we don't _need_ to scan all ranges, and that the scrub
> can be long-running, but we just take this 'event' as an opportunity to
> basically refresh everything. Since it is asynchronous, we're not
> holding anything up.

We're not holding up anything in the kernel but I assume there it's
not a zero-overhead operation.  The memory controller may be doing something
or the platform firmware could be doing something which could introduce
latency spikes.  It's the kind of thing that really annoys some customers
but maybe following an MCE no one cares about that.

-- ljk
> 
>>
>> -- ljk
>>
>>>
>>> This is done by registering a callback function with the
>>> x86_mce_decoder_chain and calling the new ars_rescan functionality with
>>> the address in the mce notificatiion.
>>>
>>> Cc: Dan Williams <dan.j.williams@intel.com>
>>> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
>>> Cc: Tony Luck <tony.luck@intel.com>
>>> Cc: <linux-acpi@vger.kernel.org>
>>> Cc: <linux-nvdimm@lists.01.org>
>>> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
>>> ---
>>>  drivers/acpi/nfit.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>  drivers/acpi/nfit.h |  1 +
>>>  2 files changed, 90 insertions(+)
>>>
>>> diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
>>> index 4e65255..c9f1ee4 100644
>>> --- a/drivers/acpi/nfit.c
>>> +++ b/drivers/acpi/nfit.c
>>> @@ -12,6 +12,7 @@
>>>   */
>>>  #include <linux/list_sort.h>
>>>  #include <linux/libnvdimm.h>
>>> +#include <linux/notifier.h>
>>>  #include <linux/module.h>
>>>  #include <linux/mutex.h>
>>>  #include <linux/ndctl.h>
>>> @@ -24,6 +25,7 @@
>>>  #include <linux/io.h>
>>>  #include <linux/nd.h>
>>>  #include <asm/cacheflush.h>
>>> +#include <asm/mce.h>
>>>  #include "nfit.h"
>>>  
>>>  /*
>>> @@ -51,6 +53,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO);
>>>  MODULE_PARM_DESC(disable_vendor_specific,
>>>  		"Limit commands to the publicly specified set\n");
>>>  
>>> +static LIST_HEAD(acpi_descs);
>>> +static DEFINE_MUTEX(acpi_desc_lock);
>>> +
>>>  static struct workqueue_struct *nfit_wq;
>>>  
>>>  struct nfit_table_prev {
>>> @@ -2416,9 +2421,11 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
>>>  
>>>  int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  {
>>> +	struct acpi_nfit_desc *acpi_desc_entry;
>>>  	struct device *dev = acpi_desc->dev;
>>>  	struct nfit_table_prev prev;
>>>  	const void *end;
>>> +	int found = 0;
>>>  	u8 *data;
>>>  	int rc;
>>>  
>>> @@ -2473,6 +2480,19 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  
>>>  	rc = acpi_nfit_register_regions(acpi_desc);
>>>  
>>> +	/*
>>> +	 * We may get here due to an update of the nfit via _FIT.
>>> +	 * Check if the acpi_desc we're (re)initializing is already
>>> +	 * present in the list, and if so, don't re-add it
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc_entry, &acpi_descs, list)
>>> +		if (acpi_desc_entry == acpi_desc)
>>> +			found = 1;
>>> +	if (found == 0)
>>> +		list_add_tail(&acpi_desc->list, &acpi_descs);
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +
>>>   out_unlock:
>>>  	mutex_unlock(&acpi_desc->init_mutex);
>>>  	return rc;
>>> @@ -2555,6 +2575,65 @@ static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
>>>  	return 0;
>>>  }
>>>  
>>> +static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
>>> +			void *data)
>>> +{
>>> +	struct mce *mce = (struct mce *)data;
>>> +	struct acpi_nfit_desc *acpi_desc;
>>> +	struct nfit_spa *nfit_spa;
>>> +
>>> +	/* We only care about memory errors */
>>> +	if (!(mce->status & MCACOD))
>>> +		return NOTIFY_DONE;
>>> +
>>> +	/*
>>> +	 * mce->addr contains the physical addr accessed that caused the
>>> +	 * machine check. We need to walk through the list of NFITs, and see
>>> +	 * if any of them matches that address, and only then start a scrub.
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc, &acpi_descs, list) {
>>> +		struct device *dev = acpi_desc->dev;
>>> +		int found_match = 0;
>>> +
>>> +		list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
>>> +			struct acpi_nfit_system_address *spa = nfit_spa->spa;
>>> +
>>> +			if (nfit_spa_type(spa) != NFIT_SPA_PM)
>>> +				continue;
>>> +			/* find the spa that covers the mce addr */
>>> +			if (spa->address > mce->addr)
>>> +				continue;
>>> +			if ((spa->address + spa->length - 1) < mce->addr)
>>> +				continue;
>>> +			found_match = 1;
>>> +			dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
>>> +				__func__, spa->range_index, spa->address,
>>> +				spa->length);
>>> +			/*
>>> +			 * We can break at the first match because we're going
>>> +			 * to rescan all the SPA ranges. There shouldn't be any
>>> +			 * aliasing anyway.
>>> +			 */
>>> +			break;
>>> +		}
>>> +
>>> +		/*
>>> +		 * We can ignore an -EBUSY here because if an ARS is already
>>> +		 * in progress, just let that be the last authoritative one
>>> +		 */
>>> +		if (found_match)
>>> +			acpi_nfit_ars_rescan(acpi_desc);
>>> +	}
>>> +
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +	return NOTIFY_DONE;
>>> +}
>>> +
>>> +static struct notifier_block nfit_mce_dec = {
>>> +	.notifier_call	= nfit_handle_mce,
>>> +};
>>> +
>>>  void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
>>>  {
>>>  	struct nvdimm_bus_descriptor *nd_desc;
>>> @@ -2676,6 +2755,9 @@ static int acpi_nfit_remove(struct acpi_device *adev)
>>>  	flush_workqueue(nfit_wq);
>>>  	sysfs_put(acpi_desc->scrub_count_state);
>>>  	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_del(&acpi_desc->list);
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  	return 0;
>>>  }
>>>  
>>> @@ -2782,13 +2864,20 @@ static __init int nfit_init(void)
>>>  	if (!nfit_wq)
>>>  		return -ENOMEM;
>>>  
>>> +	INIT_LIST_HEAD(&acpi_descs);
>>> +	mce_register_decode_chain(&nfit_mce_dec);
>>> +
>>>  	return acpi_bus_register_driver(&acpi_nfit_driver);
>>>  }
>>>  
>>>  static __exit void nfit_exit(void)
>>>  {
>>> +	mce_unregister_decode_chain(&nfit_mce_dec);
>>>  	acpi_bus_unregister_driver(&acpi_nfit_driver);
>>>  	destroy_workqueue(nfit_wq);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	WARN_ON(!list_empty(&acpi_descs));
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  }
>>>  
>>>  module_init(nfit_init);
>>> diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
>>> index 954d2aa..d2aec6f 100644
>>> --- a/drivers/acpi/nfit.h
>>> +++ b/drivers/acpi/nfit.h
>>> @@ -146,6 +146,7 @@ struct acpi_nfit_desc {
>>>  	struct nd_cmd_ars_status *ars_status;
>>>  	size_t ars_status_size;
>>>  	struct work_struct work;
>>> +	struct list_head list;
>>>  	struct kernfs_node *scrub_count_state;
>>>  	unsigned int scrub_count;
>>>  	unsigned int cancel:1;
>>>
_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Linda Knippers <linda.knippers-ZPxbGqLxI0U@public.gmane.org>
To: Vishal Verma <vishal.l.verma-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	linux-acpi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	"Rafael J. Wysocki"
	<rafael.j.wysocki-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org
Subject: Re: [PATCH v2 3/3] nfit: do an ARS scrub on hitting a latent media error
Date: Thu, 21 Jul 2016 17:25:18 -0400	[thread overview]
Message-ID: <55fdd656-deb7-56a7-cd4f-2f17599b4c2e@hpe.com> (raw)
In-Reply-To: <20160721211039.GH12960-PxNA6LsHknajYZd8rzuJLNh3ngVCH38I@public.gmane.org>



On 7/21/2016 5:10 PM, Vishal Verma wrote:
> On 07/21, Linda Knippers wrote:
>>
>>
>> On 7/20/2016 9:50 PM, Vishal Verma wrote:
>>> When a latent (unknown to 'badblocks') error is encountered, it will
>>> trigger a machine check exception. On a system with machine check
>>> recovery, this will only SIGBUS the process(es) which had the bad page
>>> mapped (as opposed to a kernel panic on platforms without machine
>>> check recovery features). In the former case, we want to trigger a full
>>> rescan of that nvdimm bus. This will allow any additional, new errors
>>> to be captured in the block devices' badblocks lists, and offending
>>> operations on them can be trapped early, avoiding machine checks.
>>
>> Do we really need to rescan all SPA ranges?  If the problem is with
>> an NVDIMM, wouldn't the blast radius be the device or it's interleave set,
>> which would be part of the same SPA range?
>>
>> I don't know what the overhead associated with a scan is which is why I'm asking.
> 
> You're right that we don't _need_ to scan all ranges, and that the scrub
> can be long-running, but we just take this 'event' as an opportunity to
> basically refresh everything. Since it is asynchronous, we're not
> holding anything up.

We're not holding up anything in the kernel but I assume there it's
not a zero-overhead operation.  The memory controller may be doing something
or the platform firmware could be doing something which could introduce
latency spikes.  It's the kind of thing that really annoys some customers
but maybe following an MCE no one cares about that.

-- ljk
> 
>>
>> -- ljk
>>
>>>
>>> This is done by registering a callback function with the
>>> x86_mce_decoder_chain and calling the new ars_rescan functionality with
>>> the address in the mce notificatiion.
>>>
>>> Cc: Dan Williams <dan.j.williams-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>>> Cc: Rafael J. Wysocki <rafael.j.wysocki-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>>> Cc: Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>>> Cc: <linux-acpi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>
>>> Cc: <linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org>
>>> Signed-off-by: Vishal Verma <vishal.l.verma-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>>> ---
>>>  drivers/acpi/nfit.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>  drivers/acpi/nfit.h |  1 +
>>>  2 files changed, 90 insertions(+)
>>>
>>> diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
>>> index 4e65255..c9f1ee4 100644
>>> --- a/drivers/acpi/nfit.c
>>> +++ b/drivers/acpi/nfit.c
>>> @@ -12,6 +12,7 @@
>>>   */
>>>  #include <linux/list_sort.h>
>>>  #include <linux/libnvdimm.h>
>>> +#include <linux/notifier.h>
>>>  #include <linux/module.h>
>>>  #include <linux/mutex.h>
>>>  #include <linux/ndctl.h>
>>> @@ -24,6 +25,7 @@
>>>  #include <linux/io.h>
>>>  #include <linux/nd.h>
>>>  #include <asm/cacheflush.h>
>>> +#include <asm/mce.h>
>>>  #include "nfit.h"
>>>  
>>>  /*
>>> @@ -51,6 +53,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO);
>>>  MODULE_PARM_DESC(disable_vendor_specific,
>>>  		"Limit commands to the publicly specified set\n");
>>>  
>>> +static LIST_HEAD(acpi_descs);
>>> +static DEFINE_MUTEX(acpi_desc_lock);
>>> +
>>>  static struct workqueue_struct *nfit_wq;
>>>  
>>>  struct nfit_table_prev {
>>> @@ -2416,9 +2421,11 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
>>>  
>>>  int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  {
>>> +	struct acpi_nfit_desc *acpi_desc_entry;
>>>  	struct device *dev = acpi_desc->dev;
>>>  	struct nfit_table_prev prev;
>>>  	const void *end;
>>> +	int found = 0;
>>>  	u8 *data;
>>>  	int rc;
>>>  
>>> @@ -2473,6 +2480,19 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  
>>>  	rc = acpi_nfit_register_regions(acpi_desc);
>>>  
>>> +	/*
>>> +	 * We may get here due to an update of the nfit via _FIT.
>>> +	 * Check if the acpi_desc we're (re)initializing is already
>>> +	 * present in the list, and if so, don't re-add it
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc_entry, &acpi_descs, list)
>>> +		if (acpi_desc_entry == acpi_desc)
>>> +			found = 1;
>>> +	if (found == 0)
>>> +		list_add_tail(&acpi_desc->list, &acpi_descs);
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +
>>>   out_unlock:
>>>  	mutex_unlock(&acpi_desc->init_mutex);
>>>  	return rc;
>>> @@ -2555,6 +2575,65 @@ static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
>>>  	return 0;
>>>  }
>>>  
>>> +static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
>>> +			void *data)
>>> +{
>>> +	struct mce *mce = (struct mce *)data;
>>> +	struct acpi_nfit_desc *acpi_desc;
>>> +	struct nfit_spa *nfit_spa;
>>> +
>>> +	/* We only care about memory errors */
>>> +	if (!(mce->status & MCACOD))
>>> +		return NOTIFY_DONE;
>>> +
>>> +	/*
>>> +	 * mce->addr contains the physical addr accessed that caused the
>>> +	 * machine check. We need to walk through the list of NFITs, and see
>>> +	 * if any of them matches that address, and only then start a scrub.
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc, &acpi_descs, list) {
>>> +		struct device *dev = acpi_desc->dev;
>>> +		int found_match = 0;
>>> +
>>> +		list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
>>> +			struct acpi_nfit_system_address *spa = nfit_spa->spa;
>>> +
>>> +			if (nfit_spa_type(spa) != NFIT_SPA_PM)
>>> +				continue;
>>> +			/* find the spa that covers the mce addr */
>>> +			if (spa->address > mce->addr)
>>> +				continue;
>>> +			if ((spa->address + spa->length - 1) < mce->addr)
>>> +				continue;
>>> +			found_match = 1;
>>> +			dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
>>> +				__func__, spa->range_index, spa->address,
>>> +				spa->length);
>>> +			/*
>>> +			 * We can break at the first match because we're going
>>> +			 * to rescan all the SPA ranges. There shouldn't be any
>>> +			 * aliasing anyway.
>>> +			 */
>>> +			break;
>>> +		}
>>> +
>>> +		/*
>>> +		 * We can ignore an -EBUSY here because if an ARS is already
>>> +		 * in progress, just let that be the last authoritative one
>>> +		 */
>>> +		if (found_match)
>>> +			acpi_nfit_ars_rescan(acpi_desc);
>>> +	}
>>> +
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +	return NOTIFY_DONE;
>>> +}
>>> +
>>> +static struct notifier_block nfit_mce_dec = {
>>> +	.notifier_call	= nfit_handle_mce,
>>> +};
>>> +
>>>  void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
>>>  {
>>>  	struct nvdimm_bus_descriptor *nd_desc;
>>> @@ -2676,6 +2755,9 @@ static int acpi_nfit_remove(struct acpi_device *adev)
>>>  	flush_workqueue(nfit_wq);
>>>  	sysfs_put(acpi_desc->scrub_count_state);
>>>  	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_del(&acpi_desc->list);
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  	return 0;
>>>  }
>>>  
>>> @@ -2782,13 +2864,20 @@ static __init int nfit_init(void)
>>>  	if (!nfit_wq)
>>>  		return -ENOMEM;
>>>  
>>> +	INIT_LIST_HEAD(&acpi_descs);
>>> +	mce_register_decode_chain(&nfit_mce_dec);
>>> +
>>>  	return acpi_bus_register_driver(&acpi_nfit_driver);
>>>  }
>>>  
>>>  static __exit void nfit_exit(void)
>>>  {
>>> +	mce_unregister_decode_chain(&nfit_mce_dec);
>>>  	acpi_bus_unregister_driver(&acpi_nfit_driver);
>>>  	destroy_workqueue(nfit_wq);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	WARN_ON(!list_empty(&acpi_descs));
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  }
>>>  
>>>  module_init(nfit_init);
>>> diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
>>> index 954d2aa..d2aec6f 100644
>>> --- a/drivers/acpi/nfit.h
>>> +++ b/drivers/acpi/nfit.h
>>> @@ -146,6 +146,7 @@ struct acpi_nfit_desc {
>>>  	struct nd_cmd_ars_status *ars_status;
>>>  	size_t ars_status_size;
>>>  	struct work_struct work;
>>> +	struct list_head list;
>>>  	struct kernfs_node *scrub_count_state;
>>>  	unsigned int scrub_count;
>>>  	unsigned int cancel:1;
>>>

WARNING: multiple messages have this Message-ID (diff)
From: Linda Knippers <linda.knippers@hpe.com>
To: Vishal Verma <vishal.l.verma@intel.com>
Cc: <linux-nvdimm@ml01.01.org>, Tony Luck <tony.luck@intel.com>,
	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>,
	<linux-kernel@vger.kernel.org>, <linux-acpi@vger.kernel.org>
Subject: Re: [PATCH v2 3/3] nfit: do an ARS scrub on hitting a latent media error
Date: Thu, 21 Jul 2016 17:25:18 -0400	[thread overview]
Message-ID: <55fdd656-deb7-56a7-cd4f-2f17599b4c2e@hpe.com> (raw)
In-Reply-To: <20160721211039.GH12960@omniknight.lm.intel.com>



On 7/21/2016 5:10 PM, Vishal Verma wrote:
> On 07/21, Linda Knippers wrote:
>>
>>
>> On 7/20/2016 9:50 PM, Vishal Verma wrote:
>>> When a latent (unknown to 'badblocks') error is encountered, it will
>>> trigger a machine check exception. On a system with machine check
>>> recovery, this will only SIGBUS the process(es) which had the bad page
>>> mapped (as opposed to a kernel panic on platforms without machine
>>> check recovery features). In the former case, we want to trigger a full
>>> rescan of that nvdimm bus. This will allow any additional, new errors
>>> to be captured in the block devices' badblocks lists, and offending
>>> operations on them can be trapped early, avoiding machine checks.
>>
>> Do we really need to rescan all SPA ranges?  If the problem is with
>> an NVDIMM, wouldn't the blast radius be the device or it's interleave set,
>> which would be part of the same SPA range?
>>
>> I don't know what the overhead associated with a scan is which is why I'm asking.
> 
> You're right that we don't _need_ to scan all ranges, and that the scrub
> can be long-running, but we just take this 'event' as an opportunity to
> basically refresh everything. Since it is asynchronous, we're not
> holding anything up.

We're not holding up anything in the kernel but I assume there it's
not a zero-overhead operation.  The memory controller may be doing something
or the platform firmware could be doing something which could introduce
latency spikes.  It's the kind of thing that really annoys some customers
but maybe following an MCE no one cares about that.

-- ljk
> 
>>
>> -- ljk
>>
>>>
>>> This is done by registering a callback function with the
>>> x86_mce_decoder_chain and calling the new ars_rescan functionality with
>>> the address in the mce notificatiion.
>>>
>>> Cc: Dan Williams <dan.j.williams@intel.com>
>>> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
>>> Cc: Tony Luck <tony.luck@intel.com>
>>> Cc: <linux-acpi@vger.kernel.org>
>>> Cc: <linux-nvdimm@lists.01.org>
>>> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
>>> ---
>>>  drivers/acpi/nfit.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>  drivers/acpi/nfit.h |  1 +
>>>  2 files changed, 90 insertions(+)
>>>
>>> diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
>>> index 4e65255..c9f1ee4 100644
>>> --- a/drivers/acpi/nfit.c
>>> +++ b/drivers/acpi/nfit.c
>>> @@ -12,6 +12,7 @@
>>>   */
>>>  #include <linux/list_sort.h>
>>>  #include <linux/libnvdimm.h>
>>> +#include <linux/notifier.h>
>>>  #include <linux/module.h>
>>>  #include <linux/mutex.h>
>>>  #include <linux/ndctl.h>
>>> @@ -24,6 +25,7 @@
>>>  #include <linux/io.h>
>>>  #include <linux/nd.h>
>>>  #include <asm/cacheflush.h>
>>> +#include <asm/mce.h>
>>>  #include "nfit.h"
>>>  
>>>  /*
>>> @@ -51,6 +53,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO);
>>>  MODULE_PARM_DESC(disable_vendor_specific,
>>>  		"Limit commands to the publicly specified set\n");
>>>  
>>> +static LIST_HEAD(acpi_descs);
>>> +static DEFINE_MUTEX(acpi_desc_lock);
>>> +
>>>  static struct workqueue_struct *nfit_wq;
>>>  
>>>  struct nfit_table_prev {
>>> @@ -2416,9 +2421,11 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
>>>  
>>>  int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  {
>>> +	struct acpi_nfit_desc *acpi_desc_entry;
>>>  	struct device *dev = acpi_desc->dev;
>>>  	struct nfit_table_prev prev;
>>>  	const void *end;
>>> +	int found = 0;
>>>  	u8 *data;
>>>  	int rc;
>>>  
>>> @@ -2473,6 +2480,19 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
>>>  
>>>  	rc = acpi_nfit_register_regions(acpi_desc);
>>>  
>>> +	/*
>>> +	 * We may get here due to an update of the nfit via _FIT.
>>> +	 * Check if the acpi_desc we're (re)initializing is already
>>> +	 * present in the list, and if so, don't re-add it
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc_entry, &acpi_descs, list)
>>> +		if (acpi_desc_entry == acpi_desc)
>>> +			found = 1;
>>> +	if (found == 0)
>>> +		list_add_tail(&acpi_desc->list, &acpi_descs);
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +
>>>   out_unlock:
>>>  	mutex_unlock(&acpi_desc->init_mutex);
>>>  	return rc;
>>> @@ -2555,6 +2575,65 @@ static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
>>>  	return 0;
>>>  }
>>>  
>>> +static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
>>> +			void *data)
>>> +{
>>> +	struct mce *mce = (struct mce *)data;
>>> +	struct acpi_nfit_desc *acpi_desc;
>>> +	struct nfit_spa *nfit_spa;
>>> +
>>> +	/* We only care about memory errors */
>>> +	if (!(mce->status & MCACOD))
>>> +		return NOTIFY_DONE;
>>> +
>>> +	/*
>>> +	 * mce->addr contains the physical addr accessed that caused the
>>> +	 * machine check. We need to walk through the list of NFITs, and see
>>> +	 * if any of them matches that address, and only then start a scrub.
>>> +	 */
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_for_each_entry(acpi_desc, &acpi_descs, list) {
>>> +		struct device *dev = acpi_desc->dev;
>>> +		int found_match = 0;
>>> +
>>> +		list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
>>> +			struct acpi_nfit_system_address *spa = nfit_spa->spa;
>>> +
>>> +			if (nfit_spa_type(spa) != NFIT_SPA_PM)
>>> +				continue;
>>> +			/* find the spa that covers the mce addr */
>>> +			if (spa->address > mce->addr)
>>> +				continue;
>>> +			if ((spa->address + spa->length - 1) < mce->addr)
>>> +				continue;
>>> +			found_match = 1;
>>> +			dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
>>> +				__func__, spa->range_index, spa->address,
>>> +				spa->length);
>>> +			/*
>>> +			 * We can break at the first match because we're going
>>> +			 * to rescan all the SPA ranges. There shouldn't be any
>>> +			 * aliasing anyway.
>>> +			 */
>>> +			break;
>>> +		}
>>> +
>>> +		/*
>>> +		 * We can ignore an -EBUSY here because if an ARS is already
>>> +		 * in progress, just let that be the last authoritative one
>>> +		 */
>>> +		if (found_match)
>>> +			acpi_nfit_ars_rescan(acpi_desc);
>>> +	}
>>> +
>>> +	mutex_unlock(&acpi_desc_lock);
>>> +	return NOTIFY_DONE;
>>> +}
>>> +
>>> +static struct notifier_block nfit_mce_dec = {
>>> +	.notifier_call	= nfit_handle_mce,
>>> +};
>>> +
>>>  void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
>>>  {
>>>  	struct nvdimm_bus_descriptor *nd_desc;
>>> @@ -2676,6 +2755,9 @@ static int acpi_nfit_remove(struct acpi_device *adev)
>>>  	flush_workqueue(nfit_wq);
>>>  	sysfs_put(acpi_desc->scrub_count_state);
>>>  	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	list_del(&acpi_desc->list);
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  	return 0;
>>>  }
>>>  
>>> @@ -2782,13 +2864,20 @@ static __init int nfit_init(void)
>>>  	if (!nfit_wq)
>>>  		return -ENOMEM;
>>>  
>>> +	INIT_LIST_HEAD(&acpi_descs);
>>> +	mce_register_decode_chain(&nfit_mce_dec);
>>> +
>>>  	return acpi_bus_register_driver(&acpi_nfit_driver);
>>>  }
>>>  
>>>  static __exit void nfit_exit(void)
>>>  {
>>> +	mce_unregister_decode_chain(&nfit_mce_dec);
>>>  	acpi_bus_unregister_driver(&acpi_nfit_driver);
>>>  	destroy_workqueue(nfit_wq);
>>> +	mutex_lock(&acpi_desc_lock);
>>> +	WARN_ON(!list_empty(&acpi_descs));
>>> +	mutex_unlock(&acpi_desc_lock);
>>>  }
>>>  
>>>  module_init(nfit_init);
>>> diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
>>> index 954d2aa..d2aec6f 100644
>>> --- a/drivers/acpi/nfit.h
>>> +++ b/drivers/acpi/nfit.h
>>> @@ -146,6 +146,7 @@ struct acpi_nfit_desc {
>>>  	struct nd_cmd_ars_status *ars_status;
>>>  	size_t ars_status_size;
>>>  	struct work_struct work;
>>> +	struct list_head list;
>>>  	struct kernfs_node *scrub_count_state;
>>>  	unsigned int scrub_count;
>>>  	unsigned int cancel:1;
>>>

  reply	other threads:[~2016-07-21 21:26 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-21  1:50 [PATCH v2 0/3] ARS rescanning triggered by latent errors or userspace Vishal Verma
2016-07-21  1:50 ` Vishal Verma
2016-07-21  1:50 ` Vishal Verma
2016-07-21  1:50 ` [PATCH v2 1/3] pmem: clarify a debug print in pmem_clear_poison Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21  1:50 ` [PATCH v2 2/3] nfit, libnvdimm: allow an ARS scrub to be triggered on demand Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21 15:56   ` Dan Williams
2016-07-21 15:56     ` Dan Williams
2016-07-21 15:56     ` Dan Williams
2016-07-21 18:07     ` Vishal Verma
2016-07-21 18:07       ` Vishal Verma
2016-07-21 18:07       ` Vishal Verma
2016-07-21 19:40   ` Linda Knippers
2016-07-21 19:40     ` Linda Knippers
2016-07-21 19:40     ` Linda Knippers
2016-07-21 19:46     ` Dan Williams
2016-07-21 19:46       ` Dan Williams
2016-07-21 19:46       ` Dan Williams
2016-07-21 19:55       ` Linda Knippers
2016-07-21 19:55         ` Linda Knippers
2016-07-21 19:55         ` Linda Knippers
2016-07-21 19:59         ` Dan Williams
2016-07-21 19:59           ` Dan Williams
2016-07-21 19:59           ` Dan Williams
2016-07-21  1:50 ` [PATCH v2 3/3] nfit: do an ARS scrub on hitting a latent media error Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21  1:50   ` Vishal Verma
2016-07-21 20:54   ` Linda Knippers
2016-07-21 20:54     ` Linda Knippers
2016-07-21 20:54     ` Linda Knippers
2016-07-21 21:10     ` Vishal Verma
2016-07-21 21:10       ` Vishal Verma
2016-07-21 21:10       ` Vishal Verma
2016-07-21 21:25       ` Linda Knippers [this message]
2016-07-21 21:25         ` Linda Knippers
2016-07-21 21:25         ` Linda Knippers

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=55fdd656-deb7-56a7-cd4f-2f17599b4c2e@hpe.com \
    --to=linda.knippers@hpe.com \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=tony.luck@intel.com \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.