linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: "Oliver O'Halloran" <oohall@gmail.com>
To: Santosh Sivaraj <santosh@fossix.org>
Cc: Reza Arbab <arbab@linux.ibm.com>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	Chandan Rajendra <chandan@linux.ibm.com>,
	linuxppc-dev <linuxppc-dev@lists.ozlabs.org>,
	Mahesh J Salgaonkar <mahesh@linux.ibm.com>
Subject: Re: [PATCH 3/3] papr/scm: Add bad memory ranges to nvdimm bad ranges
Date: Thu, 15 Aug 2019 23:11:21 +1000	[thread overview]
Message-ID: <CAOSf1CFqFHqCKQ7tf1CZjucdH5UnZ_DiuqEkppwZZLc0qYY-ZQ@mail.gmail.com> (raw)
In-Reply-To: <20190814082452.28013-4-santosh@fossix.org>

On Wed, Aug 14, 2019 at 6:25 PM Santosh Sivaraj <santosh@fossix.org> wrote:
>
> Subscribe to the MCE notification and add the physical address which
> generated a memory error to nvdimm bad range.
>
> Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
> ---
>  arch/powerpc/platforms/pseries/papr_scm.c | 65 +++++++++++++++++++++++
>  1 file changed, 65 insertions(+)
>
> diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
> index a5ac371a3f06..4d25c98a9835 100644
> --- a/arch/powerpc/platforms/pseries/papr_scm.c
> +++ b/arch/powerpc/platforms/pseries/papr_scm.c
> @@ -12,6 +12,8 @@
>  #include <linux/libnvdimm.h>
>  #include <linux/platform_device.h>
>  #include <linux/delay.h>
> +#include <linux/nd.h>
> +#include <asm/mce.h>
>
>  #include <asm/plpar_wrappers.h>
>
> @@ -39,8 +41,12 @@ struct papr_scm_priv {
>         struct resource res;
>         struct nd_region *region;
>         struct nd_interleave_set nd_set;
> +       struct list_head list;

list is not a meaningful name. call it something more descriptive.

>  };
>
> +LIST_HEAD(papr_nd_regions);
> +DEFINE_MUTEX(papr_ndr_lock);

Should this be a mutex or a spinlock? I don't know what context the
mce notifier is called from, but if it's not sleepable then a mutex
will cause problems. Did you test this with lockdep enabled?

> +
>  static int drc_pmem_bind(struct papr_scm_priv *p)
>  {
>         unsigned long ret[PLPAR_HCALL_BUFSIZE];
> @@ -364,6 +370,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
>                 dev_info(dev, "Region registered with target node %d and online node %d",
>                          target_nid, online_nid);
>
> +       mutex_lock(&papr_ndr_lock);
> +       list_add_tail(&p->list, &papr_nd_regions);
> +       mutex_unlock(&papr_ndr_lock);
> +

Where's the matching remove when we unbind the driver?

>         return 0;
>
>  err:   nvdimm_bus_unregister(p->bus);
> @@ -371,6 +381,60 @@ err:       nvdimm_bus_unregister(p->bus);
>         return -ENXIO;
>  }
>
> +static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
> +                        void *data)
> +{
> +       struct machine_check_event *evt = data;
> +       struct papr_scm_priv *p;
> +       u64 phys_addr;
> +
> +       if (evt->error_type != MCE_ERROR_TYPE_UE)
> +               return NOTIFY_DONE;
> +
> +       if (list_empty(&papr_nd_regions))
> +               return NOTIFY_DONE;
> +
> +       phys_addr = evt->u.ue_error.physical_address +
> +               (evt->u.ue_error.effective_address & ~PAGE_MASK);

Wait what? Why is physical_address page aligned, but effective_address
not? Not a problem with this patch, but still, what the hell?

> +       if (!evt->u.ue_error.physical_address_provided ||
> +           !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
> +               return NOTIFY_DONE;
> +
> +       mutex_lock(&papr_ndr_lock);
> +       list_for_each_entry(p, &papr_nd_regions, list) {
> +               struct resource res = p->res;
> +               u64 aligned_addr;
> +

> +               if (res.start > phys_addr)
> +                       continue;
> +
> +               if (res.end < phys_addr)
> +                       continue;

surely there's a helper for this

> +
> +               aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
> +               pr_debug("Add memory range (0x%llx -- 0x%llx) as bad range\n",
> +                        aligned_addr, aligned_addr + L1_CACHE_BYTES);
> +
> +               if (nvdimm_bus_add_badrange(p->bus,
> +                                           aligned_addr, L1_CACHE_BYTES))
> +                       pr_warn("Failed to add bad range (0x%llx -- 0x%llx)\n",
> +                               aligned_addr, aligned_addr + L1_CACHE_BYTES);
> +
> +               nvdimm_region_notify(p->region,
> +                                    NVDIMM_REVALIDATE_POISON);
> +
> +               break;

nit: you can avoid stacking indetation levels by breaking out of the
loop as soon as you've found the region you're looking for.

> +       }
> +       mutex_unlock(&papr_ndr_lock);
> +
> +       return NOTIFY_OK;
> +}
> +
> +static struct notifier_block mce_ue_nb = {
> +       .notifier_call = handle_mce_ue
> +};
> +
>  static int papr_scm_probe(struct platform_device *pdev)
>  {
>         struct device_node *dn = pdev->dev.of_node;
> @@ -456,6 +520,7 @@ static int papr_scm_probe(struct platform_device *pdev)
>                 goto err2;
>
>         platform_set_drvdata(pdev, p);
> +       mce_register_notifier(&mce_ue_nb);

Either get rid of the global region list and have a notifier block in
each device's driver private data, or keep the global list and
register the notifier in module_init(). Re-registering the notifier
each time a seperate device is probed seems very sketchy.

>         return 0;
>
> --
> 2.21.0
>

  reply	other threads:[~2019-08-15 13:14 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-14  8:24 [PATCH 0/3] Add bad pmem bad blocks to bad range Santosh Sivaraj
2019-08-14  8:24 ` [PATCH 1/3] powerpc/mce: Add MCE notification chain Santosh Sivaraj
2019-08-14  8:24 ` [PATCH 2/3] of_pmem: Add memory ranges which took a mce to bad range Santosh Sivaraj
2019-08-14  8:24 ` [PATCH 3/3] papr/scm: Add bad memory ranges to nvdimm bad ranges Santosh Sivaraj
2019-08-15 13:11   ` Oliver O'Halloran [this message]
2019-08-15 14:00     ` Santosh Sivaraj
2019-08-20  2:30 [PATCH 0/3] Add bad pmem bad blocks to bad range Santosh Sivaraj
2019-08-20  2:30 ` [PATCH 3/3] papr/scm: Add bad memory ranges to nvdimm bad ranges Santosh Sivaraj

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAOSf1CFqFHqCKQ7tf1CZjucdH5UnZ_DiuqEkppwZZLc0qYY-ZQ@mail.gmail.com \
    --to=oohall@gmail.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=arbab@linux.ibm.com \
    --cc=chandan@linux.ibm.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mahesh@linux.ibm.com \
    --cc=santosh@fossix.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).