Linux-ACPI Archive on lore.kernel.org
 help / color / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: Joao Martins <joao.m.martins@oracle.com>
Cc: Linux ACPI <linux-acpi@vger.kernel.org>,
	"Rafael J. Wysocki" <rjw@rjwysocki.net>,
	Peter Zijlstra <peterz@infradead.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Ard Biesheuvel <ard.biesheuvel@linaro.org>,
	linux-nvdimm <linux-nvdimm@lists.01.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	X86 ML <x86@kernel.org>
Subject: Re: [PATCH v2 4/6] ACPI: HMAT: Refactor hmat_register_target_device to hmem_register_device
Date: Wed, 25 Mar 2020 15:32:48 -0700
Message-ID: <CAPcyv4gjTmZuvqkV_r3_FuGrjK=a-CVGOnLEDZ0Fpiyg2h_Lag@mail.gmail.com> (raw)
In-Reply-To: <CAPcyv4hgExDoKZg7QQ9JRkPEY2N56EjLgLQ2Q19tu3vnUdPqgA@mail.gmail.com>

On Tue, Mar 24, 2020 at 2:04 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> On Tue, Mar 24, 2020 at 12:41 PM Joao Martins <joao.m.martins@oracle.com> wrote:
> >
> >
> > On 3/22/20 4:12 PM, Dan Williams wrote:
> > > In preparation for exposing "Soft Reserved" memory ranges without an
> > > HMAT, move the hmem device registration to its own compilation unit and
> > > make the implementation generic.
> > >
> > > The generic implementation drops usage acpi_map_pxm_to_online_node()
> > > that was translating ACPI proximity domain values and instead relies on
> > > numa_map_to_online_node() to determine the numa node for the device.
> > >
> > > Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
> > > Link: https://lore.kernel.org/r/158318761484.2216124.2049322072599482736.stgit@dwillia2-desk3.amr.corp.intel.com
> > > Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> > > ---
> > >  drivers/acpi/numa/hmat.c  |   68 ++++-----------------------------------------
> > >  drivers/dax/Kconfig       |    4 +++
> > >  drivers/dax/Makefile      |    3 +-
> > >  drivers/dax/hmem.c        |   56 -------------------------------------
> > >  drivers/dax/hmem/Makefile |    5 +++
> > >  drivers/dax/hmem/device.c |   64 ++++++++++++++++++++++++++++++++++++++++++
> > >  drivers/dax/hmem/hmem.c   |   56 +++++++++++++++++++++++++++++++++++++
> > >  include/linux/dax.h       |    8 +++++
> > >  8 files changed, 144 insertions(+), 120 deletions(-)
> > >  delete mode 100644 drivers/dax/hmem.c
> > >  create mode 100644 drivers/dax/hmem/Makefile
> > >  create mode 100644 drivers/dax/hmem/device.c
> > >  create mode 100644 drivers/dax/hmem/hmem.c
> > >
> > > diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
> > > index a12e36a12618..134bcb40b2af 100644
> > > --- a/drivers/acpi/numa/hmat.c
> > > +++ b/drivers/acpi/numa/hmat.c
> > > @@ -24,6 +24,7 @@
> > >  #include <linux/mutex.h>
> > >  #include <linux/node.h>
> > >  #include <linux/sysfs.h>
> > > +#include <linux/dax.h>
> > >
> > >  static u8 hmat_revision;
> > >  static int hmat_disable __initdata;
> > > @@ -640,66 +641,6 @@ static void hmat_register_target_perf(struct memory_target *target)
> > >       node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
> > >  }
> > >
> > > -static void hmat_register_target_device(struct memory_target *target,
> >           ^^^^ int ?
> >
> > > -             struct resource *r)
> > > -{
> > > -     /* define a clean / non-busy resource for the platform device */
> > > -     struct resource res = {
> > > -             .start = r->start,
> > > -             .end = r->end,
> > > -             .flags = IORESOURCE_MEM,
> > > -     };
> > > -     struct platform_device *pdev;
> > > -     struct memregion_info info;
> > > -     int rc, id;
> > > -
> > > -     rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
> > > -                     IORES_DESC_SOFT_RESERVED);
> > > -     if (rc != REGION_INTERSECTS)
> > > -             return;
> >                 ^ return -ENXIO;
> >
> > > -
> > > -     id = memregion_alloc(GFP_KERNEL);
> > > -     if (id < 0) {
> > > -             pr_err("memregion allocation failure for %pr\n", &res);
> > > -             return;
> >                 ^ return -ENOMEM;
> >
> > > -     }
> > > -
> > > -     pdev = platform_device_alloc("hmem", id);
> > > -     if (!pdev) {
> >
> >                 rc = -ENOMEM;
> >
> > > -             pr_err("hmem device allocation failure for %pr\n", &res);
> > > -             goto out_pdev;
> > > -     }
> > > -
> > > -     pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm);
> > > -     info = (struct memregion_info) {
> > > -             .target_node = acpi_map_pxm_to_node(target->memory_pxm),
> > > -     };
> > > -     rc = platform_device_add_data(pdev, &info, sizeof(info));
> > > -     if (rc < 0) {
> > > -             pr_err("hmem memregion_info allocation failure for %pr\n", &res);
> > > -             goto out_pdev;
> > > -     }
> > > -
> > > -     rc = platform_device_add_resources(pdev, &res, 1);
> > > -     if (rc < 0) {
> > > -             pr_err("hmem resource allocation failure for %pr\n", &res);
> > > -             goto out_resource;
> > > -     }
> > > -
> > > -     rc = platform_device_add(pdev);
> > > -     if (rc < 0) {
> > > -             dev_err(&pdev->dev, "device add failed for %pr\n", &res);
> > > -             goto out_resource;
> > > -     }
> > > -
> > > -     return;
> >         ^^^^^^ return 0;
> > > -
> > > -out_resource:
> > > -     put_device(&pdev->dev);
> > > -out_pdev:
> > > -     memregion_free(id);
> >
> >         return rc;
> >
> > > -}
> > > -
> > >  static void hmat_register_target_devices(struct memory_target *target)
> > >  {
> > >       struct resource *res;
> > > @@ -711,8 +652,11 @@ static void hmat_register_target_devices(struct memory_target *target)
> > >       if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
> > >               return;
> > >
> > > -     for (res = target->memregions.child; res; res = res->sibling)
> > > -             hmat_register_target_device(target, res);
> > > +     for (res = target->memregions.child; res; res = res->sibling) {
> > > +             int target_nid = acpi_map_pxm_to_node(target->memory_pxm);
> > > +
> > > +             hmem_register_device(target_nid, res);
> > > +     }
> > >  }
> > >
> >
> > If it makes sense to propagate error to hmem_register_device (as noted above),
> > then here perhaps a pr_err() if hmem registration fails mainly for reporting
> > purposes?
>
> Yeah, I guess it makes sense to at least log that something went wrong
> if someone wonders where their memory device went. I'll add that
> rework as a follow-on.

Now that I look again hmat_register_target_device() already has
multiple pr_err() indicating that something went wrong. The error code
would not go anywhere useful.

  reply index

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-22 16:12 [PATCH v2 0/6] Manual definition of Soft Reserved memory devices Dan Williams
2020-03-22 16:12 ` [PATCH v2 1/6] x86/numa: Cleanup configuration dependent command-line options Dan Williams
2020-03-22 16:12 ` [PATCH v2 2/6] x86/numa: Add 'nohmat' option Dan Williams
2020-03-22 16:12 ` [PATCH v2 3/6] efi/fake_mem: Arrange for a resource entry per efi_fake_mem instance Dan Williams
2020-03-22 16:12 ` [PATCH v2 4/6] ACPI: HMAT: Refactor hmat_register_target_device to hmem_register_device Dan Williams
2020-03-24 19:40   ` Joao Martins
2020-03-24 21:04     ` Dan Williams
2020-03-25 22:32       ` Dan Williams [this message]
2020-03-22 16:12 ` [PATCH v2 5/6] resource: Report parent to walk_iomem_res_desc() callback Dan Williams
2020-03-22 16:12 ` [PATCH v2 6/6] ACPI: HMAT: Attach a device for each soft-reserved range Dan Williams
2020-03-24 19:41   ` Joao Martins
2020-03-24 21:06     ` Dan Williams
2020-03-24 21:30       ` Joao Martins
2020-03-25 11:10   ` Will Deacon
2020-03-25 17:10     ` Dan Williams
2020-03-25 10:02 ` [PATCH v2 0/6] Manual definition of Soft Reserved memory devices Rafael J. Wysocki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAPcyv4gjTmZuvqkV_r3_FuGrjK=a-CVGOnLEDZ0Fpiyg2h_Lag@mail.gmail.com' \
    --to=dan.j.williams@intel.com \
    --cc=ard.biesheuvel@linaro.org \
    --cc=dave.hansen@linux.intel.com \
    --cc=joao.m.martins@oracle.com \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=peterz@infradead.org \
    --cc=rjw@rjwysocki.net \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Linux-ACPI Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-acpi/0 linux-acpi/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-acpi linux-acpi/ https://lore.kernel.org/linux-acpi \
		linux-acpi@vger.kernel.org
	public-inbox-index linux-acpi

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.linux-acpi


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git