All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] pstore/ram: Clarify resource reservation labels
@ 2018-10-18  0:29 Kees Cook
  2018-10-18  0:49 ` Dan Williams
  0 siblings, 1 reply; 15+ messages in thread
From: Kees Cook @ 2018-10-18  0:29 UTC (permalink / raw)
  To: linux-kernel
  Cc: Anton Vorontsov, Colin Cross, Tony Luck, Dan Williams, Joel Fernandes

When ramoops reserved a memory region in the kernel, it had an unhelpful
label of "persistent_memory". When reading /proc/iomem, it would be
repeated many times, did not hint that it was ramoops in particular,
and didn't clarify very much about what each was used for:

400000000-407ffffff : Persistent Memory (legacy)
  400000000-400000fff : persistent_memory
  400001000-400001fff : persistent_memory
...
  4000ff000-4000fffff : persistent_memory

Instead, this adds meaningful labels for how the various regions are
being used:

400000000-407ffffff : Persistent Memory (legacy)
  400000000-400000fff : ramoops:dump(0/252)
  400001000-400001fff : ramoops:dump(1/252)
...
  4000fc000-4000fcfff : ramoops:dump(252/252)
  4000fd000-4000fdfff : ramoops:console
  4000fe000-4000fe3ff : ramoops:ftrace(0/3)
  4000fe400-4000fe7ff : ramoops:ftrace(1/3)
  4000fe800-4000febff : ramoops:ftrace(2/3)
  4000fec00-4000fefff : ramoops:ftrace(3/3)
  4000ff000-4000fffff : ramoops:pmsg

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 fs/pstore/ram.c            | 16 +++++++++++++---
 fs/pstore/ram_core.c       | 11 +++++++----
 include/linux/pstore_ram.h |  3 ++-
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 6ea9cd801044..ffcff6516e89 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
 		goto fail;
 
 	for (i = 0; i < *cnt; i++) {
+		char *label;
+
+		if (*cnt == 1)
+			label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
+		else
+			label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
+					  name, i, *cnt - 1);
 		prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
-						  &cxt->ecc_info,
-						  cxt->memtype, flags);
+					       &cxt->ecc_info,
+					       cxt->memtype, flags, label);
 		if (IS_ERR(prz_ar[i])) {
 			err = PTR_ERR(prz_ar[i]);
 			dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
 			    struct persistent_ram_zone **prz,
 			    phys_addr_t *paddr, size_t sz, u32 sig)
 {
+	char *label;
+
 	if (!sz)
 		return 0;
 
@@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
 		return -ENOMEM;
 	}
 
+	label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
 	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
-				  cxt->memtype, 0);
+				  cxt->memtype, 0, label);
 	if (IS_ERR(*prz)) {
 		int err = PTR_ERR(*prz);
 
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 0792595ebcfb..12e21f789194 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
 }
 
 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
-		unsigned int memtype)
+		unsigned int memtype, char *label)
 {
 	void *va;
 
-	if (!request_mem_region(start, size, "persistent_ram")) {
+	if (!request_mem_region(start, size, label ?: "ramoops")) {
 		pr_err("request mem region (0x%llx@0x%llx) failed\n",
 			(unsigned long long)size, (unsigned long long)start);
 		return NULL;
@@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
 	if (pfn_valid(start >> PAGE_SHIFT))
 		prz->vaddr = persistent_ram_vmap(start, size, memtype);
 	else
-		prz->vaddr = persistent_ram_iomap(start, size, memtype);
+		prz->vaddr = persistent_ram_iomap(start, size, memtype,
+						  prz->label);
 
 	if (!prz->vaddr) {
 		pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 	prz->ecc_info.par = NULL;
 
 	persistent_ram_free_old(prz);
+	kfree(prz->label);
 	kfree(prz);
 }
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
-			unsigned int memtype, u32 flags)
+			unsigned int memtype, u32 flags, char *label)
 {
 	struct persistent_ram_zone *prz;
 	int ret = -ENOMEM;
@@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 	/* Initialize general buffer state. */
 	raw_spin_lock_init(&prz->buffer_lock);
 	prz->flags = flags;
+	prz->label = label;
 
 	ret = persistent_ram_buffer_map(start, size, prz, memtype);
 	if (ret)
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index e6d226464838..602d64725222 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -46,6 +46,7 @@ struct persistent_ram_zone {
 	phys_addr_t paddr;
 	size_t size;
 	void *vaddr;
+	char *label;
 	struct persistent_ram_buffer *buffer;
 	size_t buffer_size;
 	u32 flags;
@@ -65,7 +66,7 @@ struct persistent_ram_zone {
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
-			unsigned int memtype, u32 flags);
+			unsigned int memtype, u32 flags, char *label);
 void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 
-- 
2.17.1


-- 
Kees Cook
Pixel Security

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18  0:29 [PATCH] pstore/ram: Clarify resource reservation labels Kees Cook
@ 2018-10-18  0:49 ` Dan Williams
  2018-10-18  7:14   ` Kees Cook
  0 siblings, 1 reply; 15+ messages in thread
From: Dan Williams @ 2018-10-18  0:49 UTC (permalink / raw)
  To: Kees Cook
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, joel

On Wed, Oct 17, 2018 at 5:29 PM Kees Cook <keescook@chromium.org> wrote:
>
> When ramoops reserved a memory region in the kernel, it had an unhelpful
> label of "persistent_memory". When reading /proc/iomem, it would be
> repeated many times, did not hint that it was ramoops in particular,
> and didn't clarify very much about what each was used for:
>
> 400000000-407ffffff : Persistent Memory (legacy)
>   400000000-400000fff : persistent_memory
>   400001000-400001fff : persistent_memory
> ...
>   4000ff000-4000fffff : persistent_memory
>
> Instead, this adds meaningful labels for how the various regions are
> being used:
>
> 400000000-407ffffff : Persistent Memory (legacy)
>   400000000-400000fff : ramoops:dump(0/252)
>   400001000-400001fff : ramoops:dump(1/252)
> ...
>   4000fc000-4000fcfff : ramoops:dump(252/252)
>   4000fd000-4000fdfff : ramoops:console
>   4000fe000-4000fe3ff : ramoops:ftrace(0/3)
>   4000fe400-4000fe7ff : ramoops:ftrace(1/3)
>   4000fe800-4000febff : ramoops:ftrace(2/3)
>   4000fec00-4000fefff : ramoops:ftrace(3/3)
>   4000ff000-4000fffff : ramoops:pmsg

Hopefully ramoops is doing request_region() before trying to do
anything with its ranges, because it's going to collide with the pmem
driver doing a request_region(). If we want to have pstore use pmem as
a backing store that's a new drivers/nvdimm/ namespace personality
driver to turn around and register a persistent memory range with
pstore rather than the pmem block-device driver.
>
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
>  fs/pstore/ram.c            | 16 +++++++++++++---
>  fs/pstore/ram_core.c       | 11 +++++++----
>  include/linux/pstore_ram.h |  3 ++-
>  3 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
> index 6ea9cd801044..ffcff6516e89 100644
> --- a/fs/pstore/ram.c
> +++ b/fs/pstore/ram.c
> @@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
>                 goto fail;
>
>         for (i = 0; i < *cnt; i++) {
> +               char *label;
> +
> +               if (*cnt == 1)
> +                       label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
> +               else
> +                       label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
> +                                         name, i, *cnt - 1);
>                 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
> -                                                 &cxt->ecc_info,
> -                                                 cxt->memtype, flags);
> +                                              &cxt->ecc_info,
> +                                              cxt->memtype, flags, label);
>                 if (IS_ERR(prz_ar[i])) {
>                         err = PTR_ERR(prz_ar[i]);
>                         dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
> @@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
>                             struct persistent_ram_zone **prz,
>                             phys_addr_t *paddr, size_t sz, u32 sig)
>  {
> +       char *label;
> +
>         if (!sz)
>                 return 0;
>
> @@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
>                 return -ENOMEM;
>         }
>
> +       label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
>         *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
> -                                 cxt->memtype, 0);
> +                                 cxt->memtype, 0, label);
>         if (IS_ERR(*prz)) {
>                 int err = PTR_ERR(*prz);
>
> diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
> index 0792595ebcfb..12e21f789194 100644
> --- a/fs/pstore/ram_core.c
> +++ b/fs/pstore/ram_core.c
> @@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
>  }
>
>  static void *persistent_ram_iomap(phys_addr_t start, size_t size,
> -               unsigned int memtype)
> +               unsigned int memtype, char *label)
>  {
>         void *va;
>
> -       if (!request_mem_region(start, size, "persistent_ram")) {
> +       if (!request_mem_region(start, size, label ?: "ramoops")) {
>                 pr_err("request mem region (0x%llx@0x%llx) failed\n",
>                         (unsigned long long)size, (unsigned long long)start);
>                 return NULL;
> @@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
>         if (pfn_valid(start >> PAGE_SHIFT))
>                 prz->vaddr = persistent_ram_vmap(start, size, memtype);
>         else
> -               prz->vaddr = persistent_ram_iomap(start, size, memtype);
> +               prz->vaddr = persistent_ram_iomap(start, size, memtype,
> +                                                 prz->label);
>
>         if (!prz->vaddr) {
>                 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
> @@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
>         prz->ecc_info.par = NULL;
>
>         persistent_ram_free_old(prz);
> +       kfree(prz->label);
>         kfree(prz);
>  }
>
>  struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
>                         u32 sig, struct persistent_ram_ecc_info *ecc_info,
> -                       unsigned int memtype, u32 flags)
> +                       unsigned int memtype, u32 flags, char *label)
>  {
>         struct persistent_ram_zone *prz;
>         int ret = -ENOMEM;
> @@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
>         /* Initialize general buffer state. */
>         raw_spin_lock_init(&prz->buffer_lock);
>         prz->flags = flags;
> +       prz->label = label;
>
>         ret = persistent_ram_buffer_map(start, size, prz, memtype);
>         if (ret)
> diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
> index e6d226464838..602d64725222 100644
> --- a/include/linux/pstore_ram.h
> +++ b/include/linux/pstore_ram.h
> @@ -46,6 +46,7 @@ struct persistent_ram_zone {
>         phys_addr_t paddr;
>         size_t size;
>         void *vaddr;
> +       char *label;
>         struct persistent_ram_buffer *buffer;
>         size_t buffer_size;
>         u32 flags;
> @@ -65,7 +66,7 @@ struct persistent_ram_zone {
>
>  struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
>                         u32 sig, struct persistent_ram_ecc_info *ecc_info,
> -                       unsigned int memtype, u32 flags);
> +                       unsigned int memtype, u32 flags, char *label);
>  void persistent_ram_free(struct persistent_ram_zone *prz);
>  void persistent_ram_zap(struct persistent_ram_zone *prz);
>
> --
> 2.17.1
>
>
> --
> Kees Cook
> Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18  0:49 ` Dan Williams
@ 2018-10-18  7:14   ` Kees Cook
  2018-10-18 15:33     ` Dan Williams
  0 siblings, 1 reply; 15+ messages in thread
From: Kees Cook @ 2018-10-18  7:14 UTC (permalink / raw)
  To: Dan Williams
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, Joel Fernandes

On Wed, Oct 17, 2018 at 5:49 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> On Wed, Oct 17, 2018 at 5:29 PM Kees Cook <keescook@chromium.org> wrote:
>>
>> When ramoops reserved a memory region in the kernel, it had an unhelpful
>> label of "persistent_memory". When reading /proc/iomem, it would be
>> repeated many times, did not hint that it was ramoops in particular,
>> and didn't clarify very much about what each was used for:
>>
>> 400000000-407ffffff : Persistent Memory (legacy)
>>   400000000-400000fff : persistent_memory
>>   400001000-400001fff : persistent_memory
>> ...
>>   4000ff000-4000fffff : persistent_memory
>>
>> Instead, this adds meaningful labels for how the various regions are
>> being used:
>>
>> 400000000-407ffffff : Persistent Memory (legacy)
>>   400000000-400000fff : ramoops:dump(0/252)
>>   400001000-400001fff : ramoops:dump(1/252)
>> ...
>>   4000fc000-4000fcfff : ramoops:dump(252/252)
>>   4000fd000-4000fdfff : ramoops:console
>>   4000fe000-4000fe3ff : ramoops:ftrace(0/3)
>>   4000fe400-4000fe7ff : ramoops:ftrace(1/3)
>>   4000fe800-4000febff : ramoops:ftrace(2/3)
>>   4000fec00-4000fefff : ramoops:ftrace(3/3)
>>   4000ff000-4000fffff : ramoops:pmsg
>
> Hopefully ramoops is doing request_region() before trying to do
> anything with its ranges, because it's going to collide with the pmem
> driver doing a request_region(). If we want to have pstore use pmem as
> a backing store that's a new drivers/nvdimm/ namespace personality
> driver to turn around and register a persistent memory range with
> pstore rather than the pmem block-device driver.

Yup: it's using request_mem_region() (that's where the labels above
are assigned).

As for nvdimm specifically, yes, I'd love to get pstore hooked up
correctly to nvdimm. How do the namespaces work? Right now pstore
depends one of platform driver data, device tree specification, or
manual module parameters.

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18  7:14   ` Kees Cook
@ 2018-10-18 15:33     ` Dan Williams
  2018-10-18 20:31       ` Kees Cook
  0 siblings, 1 reply; 15+ messages in thread
From: Dan Williams @ 2018-10-18 15:33 UTC (permalink / raw)
  To: Kees Cook
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, joel, zwisler

[ add Ross ]

On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
>
> On Wed, Oct 17, 2018 at 5:49 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> > On Wed, Oct 17, 2018 at 5:29 PM Kees Cook <keescook@chromium.org> wrote:
> >>
> >> When ramoops reserved a memory region in the kernel, it had an unhelpful
> >> label of "persistent_memory". When reading /proc/iomem, it would be
> >> repeated many times, did not hint that it was ramoops in particular,
> >> and didn't clarify very much about what each was used for:
> >>
> >> 400000000-407ffffff : Persistent Memory (legacy)
> >>   400000000-400000fff : persistent_memory
> >>   400001000-400001fff : persistent_memory
> >> ...
> >>   4000ff000-4000fffff : persistent_memory
> >>
> >> Instead, this adds meaningful labels for how the various regions are
> >> being used:
> >>
> >> 400000000-407ffffff : Persistent Memory (legacy)
> >>   400000000-400000fff : ramoops:dump(0/252)
> >>   400001000-400001fff : ramoops:dump(1/252)
> >> ...
> >>   4000fc000-4000fcfff : ramoops:dump(252/252)
> >>   4000fd000-4000fdfff : ramoops:console
> >>   4000fe000-4000fe3ff : ramoops:ftrace(0/3)
> >>   4000fe400-4000fe7ff : ramoops:ftrace(1/3)
> >>   4000fe800-4000febff : ramoops:ftrace(2/3)
> >>   4000fec00-4000fefff : ramoops:ftrace(3/3)
> >>   4000ff000-4000fffff : ramoops:pmsg
> >
> > Hopefully ramoops is doing request_region() before trying to do
> > anything with its ranges, because it's going to collide with the pmem
> > driver doing a request_region(). If we want to have pstore use pmem as
> > a backing store that's a new drivers/nvdimm/ namespace personality
> > driver to turn around and register a persistent memory range with
> > pstore rather than the pmem block-device driver.
>
> Yup: it's using request_mem_region() (that's where the labels above
> are assigned).
>
> As for nvdimm specifically, yes, I'd love to get pstore hooked up
> correctly to nvdimm. How do the namespaces work? Right now pstore
> depends one of platform driver data, device tree specification, or
> manual module parameters.

From the userspace side we have the ndctl utility to wrap
personalities on top of namespaces. So for example, I envision we
would be able to do:

    ndctl create-namespace --mode=pstore --size=128M

...and create a small namespace that will register with the pstore sub-system.

On the kernel side this would involve registering a 'pstore_dev' child
/ seed device under each region device. The 'seed-device' sysfs scheme
is described in our documentation [1]. The short summary is ndctl
finds a seed device assigns a namespace to it and then binding that
device to a driver causes it to be initialized by the kernel.

[1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 15:33     ` Dan Williams
@ 2018-10-18 20:31       ` Kees Cook
  2018-10-18 20:58         ` Ross Zwisler
  2018-10-18 21:35         ` Dan Williams
  0 siblings, 2 replies; 15+ messages in thread
From: Kees Cook @ 2018-10-18 20:31 UTC (permalink / raw)
  To: Dan Williams
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, Joel Fernandes, zwisler

On Thu, Oct 18, 2018 at 8:33 AM, Dan Williams <dan.j.williams@intel.com> wrote:
> [ add Ross ]

Hi Ross! :)

> On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
>> As for nvdimm specifically, yes, I'd love to get pstore hooked up
>> correctly to nvdimm. How do the namespaces work? Right now pstore
>> depends one of platform driver data, device tree specification, or
>> manual module parameters.
>
> From the userspace side we have the ndctl utility to wrap
> personalities on top of namespaces. So for example, I envision we
> would be able to do:
>
>     ndctl create-namespace --mode=pstore --size=128M
>
> ...and create a small namespace that will register with the pstore sub-system.
>
> On the kernel side this would involve registering a 'pstore_dev' child
> / seed device under each region device. The 'seed-device' sysfs scheme
> is described in our documentation [1]. The short summary is ndctl
> finds a seed device assigns a namespace to it and then binding that
> device to a driver causes it to be initialized by the kernel.
>
> [1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt

Interesting!

Really, this would be a way to configure "ramoops" (the persistent RAM
backend to pstore), rather than pstore itself (pstore is just the
framework). From reading the ndctl man page it sounds like there isn't
a way to store configuration information beyond just size?

ramoops will auto-configure itself and fill available space using its
default parameters, but it might be nice to have a way to store that
somewhere (traditionally it's part of device tree or platform data).
ramoops could grow a "header", but normally the regions are very small
so I've avoided that.

I'm not sure I understand the right way to glue ramoops_probe() to the
"seed-device" stuff. (It needs to be probed VERY early to catch early
crashes -- ramoops uses postcore_initcall() normally.)

Thanks for the pointers!

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 20:31       ` Kees Cook
@ 2018-10-18 20:58         ` Ross Zwisler
  2018-10-18 21:20           ` Kees Cook
  2018-10-18 21:35         ` Dan Williams
  1 sibling, 1 reply; 15+ messages in thread
From: Ross Zwisler @ 2018-10-18 20:58 UTC (permalink / raw)
  To: keescook; +Cc: dan.j.williams, linux-kernel, anton, ccross, tony.luck, joel

On Thu, Oct 18, 2018 at 2:31 PM Kees Cook <keescook@chromium.org> wrote:
>
> On Thu, Oct 18, 2018 at 8:33 AM, Dan Williams <dan.j.williams@intel.com> wrote:
> > [ add Ross ]
>
> Hi Ross! :)
>
> > On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
> >> As for nvdimm specifically, yes, I'd love to get pstore hooked up
> >> correctly to nvdimm. How do the namespaces work? Right now pstore
> >> depends one of platform driver data, device tree specification, or
> >> manual module parameters.
> >
> > From the userspace side we have the ndctl utility to wrap
> > personalities on top of namespaces. So for example, I envision we
> > would be able to do:
> >
> >     ndctl create-namespace --mode=pstore --size=128M
> >
> > ...and create a small namespace that will register with the pstore sub-system.
> >
> > On the kernel side this would involve registering a 'pstore_dev' child
> > / seed device under each region device. The 'seed-device' sysfs scheme
> > is described in our documentation [1]. The short summary is ndctl
> > finds a seed device assigns a namespace to it and then binding that
> > device to a driver causes it to be initialized by the kernel.
> >
> > [1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt
>
> Interesting!
>
> Really, this would be a way to configure "ramoops" (the persistent RAM
> backend to pstore), rather than pstore itself (pstore is just the
> framework). From reading the ndctl man page it sounds like there isn't
> a way to store configuration information beyond just size?

Ramoops needs a start (mem_address), size (mem_size) and mapping type
(mem_type), right?  I think we get the first two for free based on the
size of the namespace, so really we'd just be looking for a way to
switch between cacheable/noncached memory?

> ramoops will auto-configure itself and fill available space using its
> default parameters, but it might be nice to have a way to store that
> somewhere (traditionally it's part of device tree or platform data).
> ramoops could grow a "header", but normally the regions are very small
> so I've avoided that.

Several of the other modes (BTT and DAX) have space for additional
metadata in their namespaces.  If we just need a single bit, though,
maybe we can grab that out of the "flags" field of the namespace
label.

http://pmem.io/documents/NVDIMM_Namespace_Spec.pdf   section 2.2.3.

Dan, is this workable or is there a better option?  Is it a useful
feature to have other types of namespaces be able to control their
caching attributes in this way?

> I'm not sure I understand the right way to glue ramoops_probe() to the
> "seed-device" stuff. (It needs to be probed VERY early to catch early
> crashes -- ramoops uses postcore_initcall() normally.)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 20:58         ` Ross Zwisler
@ 2018-10-18 21:20           ` Kees Cook
  0 siblings, 0 replies; 15+ messages in thread
From: Kees Cook @ 2018-10-18 21:20 UTC (permalink / raw)
  To: Ross Zwisler
  Cc: Dan Williams, LKML, Anton Vorontsov, Colin Cross, Tony Luck,
	Joel Fernandes

On Thu, Oct 18, 2018 at 1:58 PM, Ross Zwisler <zwisler@google.com> wrote:
> On Thu, Oct 18, 2018 at 2:31 PM Kees Cook <keescook@chromium.org> wrote:
>>
>> On Thu, Oct 18, 2018 at 8:33 AM, Dan Williams <dan.j.williams@intel.com> wrote:
>> > [ add Ross ]
>>
>> Hi Ross! :)
>>
>> > On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
>> >> As for nvdimm specifically, yes, I'd love to get pstore hooked up
>> >> correctly to nvdimm. How do the namespaces work? Right now pstore
>> >> depends one of platform driver data, device tree specification, or
>> >> manual module parameters.
>> >
>> > From the userspace side we have the ndctl utility to wrap
>> > personalities on top of namespaces. So for example, I envision we
>> > would be able to do:
>> >
>> >     ndctl create-namespace --mode=pstore --size=128M
>> >
>> > ...and create a small namespace that will register with the pstore sub-system.
>> >
>> > On the kernel side this would involve registering a 'pstore_dev' child
>> > / seed device under each region device. The 'seed-device' sysfs scheme
>> > is described in our documentation [1]. The short summary is ndctl
>> > finds a seed device assigns a namespace to it and then binding that
>> > device to a driver causes it to be initialized by the kernel.
>> >
>> > [1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt
>>
>> Interesting!
>>
>> Really, this would be a way to configure "ramoops" (the persistent RAM
>> backend to pstore), rather than pstore itself (pstore is just the
>> framework). From reading the ndctl man page it sounds like there isn't
>> a way to store configuration information beyond just size?
>
> Ramoops needs a start (mem_address), size (mem_size) and mapping type
> (mem_type), right?  I think we get the first two for free based on the
> size of the namespace, so really we'd just be looking for a way to
> switch between cacheable/noncached memory?

Start and size would be a good starting point, yes. That would let
automatic layout happen, which could be improved in the future.

mem_type just chooses ioremap() ioremap_wc() after the request_mem_region():

        if (!request_mem_region(start, size, label ?: "ramoops")) {
                pr_err("request mem region (0x%llx@0x%llx) failed\n",
                        (unsigned long long)size, (unsigned long long)start);
                return NULL;
        }

        if (memtype)
                va = ioremap(start, size);
        else
                va = ioremap_wc(start, size);

Is this feature "knowable" during probe time? Traditionally all these
details had to be stored separately, but if the nvdimm core knows the
right answer, it could just pass the correct memtype during the
ramoops probe.

> Several of the other modes (BTT and DAX) have space for additional
> metadata in their namespaces.  If we just need a single bit, though,
> maybe we can grab that out of the "flags" field of the namespace
> label.

This feels a bit like a hack? If I want something better than command
line args for ramoops sub-region sizing, I'll probably build a
"header" region starting at mem_address with a magic number, version,
etc.

Thanks for your help!

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 20:31       ` Kees Cook
  2018-10-18 20:58         ` Ross Zwisler
@ 2018-10-18 21:35         ` Dan Williams
  2018-10-18 22:18           ` Kees Cook
  1 sibling, 1 reply; 15+ messages in thread
From: Dan Williams @ 2018-10-18 21:35 UTC (permalink / raw)
  To: Kees Cook
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, joel, zwisler

On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
>
> On Thu, Oct 18, 2018 at 8:33 AM, Dan Williams <dan.j.williams@intel.com> wrote:
> > [ add Ross ]
>
> Hi Ross! :)
>
> > On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
> >> As for nvdimm specifically, yes, I'd love to get pstore hooked up
> >> correctly to nvdimm. How do the namespaces work? Right now pstore
> >> depends one of platform driver data, device tree specification, or
> >> manual module parameters.
> >
> > From the userspace side we have the ndctl utility to wrap
> > personalities on top of namespaces. So for example, I envision we
> > would be able to do:
> >
> >     ndctl create-namespace --mode=pstore --size=128M
> >
> > ...and create a small namespace that will register with the pstore sub-system.
> >
> > On the kernel side this would involve registering a 'pstore_dev' child
> > / seed device under each region device. The 'seed-device' sysfs scheme
> > is described in our documentation [1]. The short summary is ndctl
> > finds a seed device assigns a namespace to it and then binding that
> > device to a driver causes it to be initialized by the kernel.
> >
> > [1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt
>
> Interesting!
>
> Really, this would be a way to configure "ramoops" (the persistent RAM
> backend to pstore), rather than pstore itself (pstore is just the
> framework). From reading the ndctl man page it sounds like there isn't
> a way to store configuration information beyond just size?
>
> ramoops will auto-configure itself and fill available space using its
> default parameters, but it might be nice to have a way to store that
> somewhere (traditionally it's part of device tree or platform data).
> ramoops could grow a "header", but normally the regions are very small
> so I've avoided that.
>
> I'm not sure I understand the right way to glue ramoops_probe() to the
> "seed-device" stuff. (It needs to be probed VERY early to catch early
> crashes -- ramoops uses postcore_initcall() normally.)

Irk, yeah, that's early. On some configurations we can't delineate
namespaces until after ACPI has come up. Ideally the address range
would be reserved and communicated in the memory-map from the BIOS.

In EFI terms I think early ramoops is only suitable for
EfiACPIMemoryNVS, but we could certainly support a late arriving
ramoops for EfiPersistentMemory with this proposed namespace scheme.

I cringe at users picking addresses because someone is going to enable
ramoops on top of their persistent memory namespace and wonder why
their filesystem got clobbered. Should attempts to specify an explicit
ramoops range that intersects EfiPersistentMemory fail by default? The
memmap=ss!nn parameter has burned us many times with users picking the
wrong address, so I'd be inclined to hide this ramoops sharp edge from
them.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 21:35         ` Dan Williams
@ 2018-10-18 22:18           ` Kees Cook
  2018-10-18 22:23             ` Dan Williams
  0 siblings, 1 reply; 15+ messages in thread
From: Kees Cook @ 2018-10-18 22:18 UTC (permalink / raw)
  To: Dan Williams
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, Joel Fernandes, Ross Zwisler

On Thu, Oct 18, 2018 at 2:35 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
>>
>> On Thu, Oct 18, 2018 at 8:33 AM, Dan Williams <dan.j.williams@intel.com> wrote:
>> > [ add Ross ]
>>
>> Hi Ross! :)
>>
>> > On Thu, Oct 18, 2018 at 12:15 AM Kees Cook <keescook@chromium.org> wrote:
>> >> As for nvdimm specifically, yes, I'd love to get pstore hooked up
>> >> correctly to nvdimm. How do the namespaces work? Right now pstore
>> >> depends one of platform driver data, device tree specification, or
>> >> manual module parameters.
>> >
>> > From the userspace side we have the ndctl utility to wrap
>> > personalities on top of namespaces. So for example, I envision we
>> > would be able to do:
>> >
>> >     ndctl create-namespace --mode=pstore --size=128M
>> >
>> > ...and create a small namespace that will register with the pstore sub-system.
>> >
>> > On the kernel side this would involve registering a 'pstore_dev' child
>> > / seed device under each region device. The 'seed-device' sysfs scheme
>> > is described in our documentation [1]. The short summary is ndctl
>> > finds a seed device assigns a namespace to it and then binding that
>> > device to a driver causes it to be initialized by the kernel.
>> >
>> > [1]: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt
>>
>> Interesting!
>>
>> Really, this would be a way to configure "ramoops" (the persistent RAM
>> backend to pstore), rather than pstore itself (pstore is just the
>> framework). From reading the ndctl man page it sounds like there isn't
>> a way to store configuration information beyond just size?
>>
>> ramoops will auto-configure itself and fill available space using its
>> default parameters, but it might be nice to have a way to store that
>> somewhere (traditionally it's part of device tree or platform data).
>> ramoops could grow a "header", but normally the regions are very small
>> so I've avoided that.
>>
>> I'm not sure I understand the right way to glue ramoops_probe() to the
>> "seed-device" stuff. (It needs to be probed VERY early to catch early
>> crashes -- ramoops uses postcore_initcall() normally.)
>
> Irk, yeah, that's early. On some configurations we can't delineate
> namespaces until after ACPI has come up. Ideally the address range
> would be reserved and communicated in the memory-map from the BIOS.

Yeah, I'm wondering if I should introduce a mode for ramoops where it
walks the memory regions looking for persistent ram areas, and uses
the first available. Something like "ramoops.mem_address=first
ramoops.mem_size=NNN"

> I cringe at users picking addresses because someone is going to enable
> ramoops on top of their persistent memory namespace and wonder why
> their filesystem got clobbered. Should attempts to specify an explicit
> ramoops range that intersects EfiPersistentMemory fail by default? The
> memmap=ss!nn parameter has burned us many times with users picking the
> wrong address, so I'd be inclined to hide this ramoops sharp edge from
> them.

Yeah, this is what I'm trying to solve. I'd like ramoops to find the
address itself, but it has to do it really early, so if I can't have
nvdimm handle it directly, will having regions already allocated with
request_mem_region() "get along" with the rest of nvdimm?

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:18           ` Kees Cook
@ 2018-10-18 22:23             ` Dan Williams
  2018-10-18 22:26               ` Kees Cook
  0 siblings, 1 reply; 15+ messages in thread
From: Dan Williams @ 2018-10-18 22:23 UTC (permalink / raw)
  To: Kees Cook
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, joel, zwisler

On Thu, Oct 18, 2018 at 3:19 PM Kees Cook <keescook@chromium.org> wrote:
>
> On Thu, Oct 18, 2018 at 2:35 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> > On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
[..]
> > I cringe at users picking addresses because someone is going to enable
> > ramoops on top of their persistent memory namespace and wonder why
> > their filesystem got clobbered. Should attempts to specify an explicit
> > ramoops range that intersects EfiPersistentMemory fail by default? The
> > memmap=ss!nn parameter has burned us many times with users picking the
> > wrong address, so I'd be inclined to hide this ramoops sharp edge from
> > them.
>
> Yeah, this is what I'm trying to solve. I'd like ramoops to find the
> address itself, but it has to do it really early, so if I can't have
> nvdimm handle it directly, will having regions already allocated with
> request_mem_region() "get along" with the rest of nvdimm?

If the filesystem existed on the namespace before the user specified
the ramoops command line then ramoops will clobber the filesystem and
the user will only find out when mount later fails. All the kernel
will say is:

    dev_warn(dev, "could not reserve region %pR\n", res);

...from the pmem driver, and then the only way to figure who the
conflict is with is to look at /proc/iomem, but the damage is already
likely done by that point.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:23             ` Dan Williams
@ 2018-10-18 22:26               ` Kees Cook
  2018-10-18 22:33                 ` Dan Williams
  2018-10-18 22:43                 ` Luck, Tony
  0 siblings, 2 replies; 15+ messages in thread
From: Kees Cook @ 2018-10-18 22:26 UTC (permalink / raw)
  To: Dan Williams
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, Joel Fernandes, Ross Zwisler

On Thu, Oct 18, 2018 at 3:23 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> On Thu, Oct 18, 2018 at 3:19 PM Kees Cook <keescook@chromium.org> wrote:
>>
>> On Thu, Oct 18, 2018 at 2:35 PM, Dan Williams <dan.j.williams@intel.com> wrote:
>> > On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
> [..]
>> > I cringe at users picking addresses because someone is going to enable
>> > ramoops on top of their persistent memory namespace and wonder why
>> > their filesystem got clobbered. Should attempts to specify an explicit
>> > ramoops range that intersects EfiPersistentMemory fail by default? The
>> > memmap=ss!nn parameter has burned us many times with users picking the
>> > wrong address, so I'd be inclined to hide this ramoops sharp edge from
>> > them.
>>
>> Yeah, this is what I'm trying to solve. I'd like ramoops to find the
>> address itself, but it has to do it really early, so if I can't have
>> nvdimm handle it directly, will having regions already allocated with
>> request_mem_region() "get along" with the rest of nvdimm?
>
> If the filesystem existed on the namespace before the user specified
> the ramoops command line then ramoops will clobber the filesystem and
> the user will only find out when mount later fails. All the kernel
> will say is:
>
>     dev_warn(dev, "could not reserve region %pR\n", res);
>
> ...from the pmem driver, and then the only way to figure who the
> conflict is with is to look at /proc/iomem, but the damage is already
> likely done by that point.

Yeah, bleh. Okay, well, let's just skip this for now, since ramoops
doesn't do _anything_ with pmem now. No need to go crazy right from
the start. Instead, let's make it work "normally", and if someone
needs it for very early boot, they can manually enter the mem_address.

How should I attach a ramoops_probe() call to pmem?

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:26               ` Kees Cook
@ 2018-10-18 22:33                 ` Dan Williams
  2018-10-18 22:58                   ` Kees Cook
  2018-10-18 22:43                 ` Luck, Tony
  1 sibling, 1 reply; 15+ messages in thread
From: Dan Williams @ 2018-10-18 22:33 UTC (permalink / raw)
  To: Kees Cook
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, joel, zwisler

On Thu, Oct 18, 2018 at 3:26 PM Kees Cook <keescook@chromium.org> wrote:
>
> On Thu, Oct 18, 2018 at 3:23 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> > On Thu, Oct 18, 2018 at 3:19 PM Kees Cook <keescook@chromium.org> wrote:
> >>
> >> On Thu, Oct 18, 2018 at 2:35 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> >> > On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
> > [..]
> >> > I cringe at users picking addresses because someone is going to enable
> >> > ramoops on top of their persistent memory namespace and wonder why
> >> > their filesystem got clobbered. Should attempts to specify an explicit
> >> > ramoops range that intersects EfiPersistentMemory fail by default? The
> >> > memmap=ss!nn parameter has burned us many times with users picking the
> >> > wrong address, so I'd be inclined to hide this ramoops sharp edge from
> >> > them.
> >>
> >> Yeah, this is what I'm trying to solve. I'd like ramoops to find the
> >> address itself, but it has to do it really early, so if I can't have
> >> nvdimm handle it directly, will having regions already allocated with
> >> request_mem_region() "get along" with the rest of nvdimm?
> >
> > If the filesystem existed on the namespace before the user specified
> > the ramoops command line then ramoops will clobber the filesystem and
> > the user will only find out when mount later fails. All the kernel
> > will say is:
> >
> >     dev_warn(dev, "could not reserve region %pR\n", res);
> >
> > ...from the pmem driver, and then the only way to figure who the
> > conflict is with is to look at /proc/iomem, but the damage is already
> > likely done by that point.
>
> Yeah, bleh. Okay, well, let's just skip this for now, since ramoops
> doesn't do _anything_ with pmem now. No need to go crazy right from
> the start. Instead, let's make it work "normally", and if someone
> needs it for very early boot, they can manually enter the mem_address.
>
> How should I attach a ramoops_probe() call to pmem?

To me this looks like it would be a nvdimm glue driver whose entire
job is to attach to the namespace, fill out some
ramoops_platform_data, and then register a "ramoops" platform_device
for the ramoops driver to find.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:26               ` Kees Cook
  2018-10-18 22:33                 ` Dan Williams
@ 2018-10-18 22:43                 ` Luck, Tony
  2018-10-18 22:49                   ` Dan Williams
  1 sibling, 1 reply; 15+ messages in thread
From: Luck, Tony @ 2018-10-18 22:43 UTC (permalink / raw)
  To: Kees Cook, Williams, Dan J
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross,
	Joel Fernandes, Ross Zwisler

> If the filesystem existed on the namespace before the user specified
> the ramoops command line then ramoops will clobber the filesystem and
> the user will only find out when mount later fails. All the kernel
> will say is:
>
>     dev_warn(dev, "could not reserve region %pR\n", res);
>
> ...from the pmem driver, and then the only way to figure who the
> conflict is with is to look at /proc/iomem, but the damage is already
> likely done by that point.

When you set up a ramoops region in pmem, write a magic header block
at the start of the area.  Then when pstore/ramoops goes to find the
region, it checks for the magic header.  Not there? Don't write to the
region. Problem solved.

-Tony

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:43                 ` Luck, Tony
@ 2018-10-18 22:49                   ` Dan Williams
  0 siblings, 0 replies; 15+ messages in thread
From: Dan Williams @ 2018-10-18 22:49 UTC (permalink / raw)
  To: Luck, Tony
  Cc: Kees Cook, Linux Kernel Mailing List, Anton Vorontsov,
	Colin Cross, joel, zwisler

On Thu, Oct 18, 2018 at 3:43 PM Luck, Tony <tony.luck@intel.com> wrote:
>
> > If the filesystem existed on the namespace before the user specified
> > the ramoops command line then ramoops will clobber the filesystem and
> > the user will only find out when mount later fails. All the kernel
> > will say is:
> >
> >     dev_warn(dev, "could not reserve region %pR\n", res);
> >
> > ...from the pmem driver, and then the only way to figure who the
> > conflict is with is to look at /proc/iomem, but the damage is already
> > likely done by that point.
>
> When you set up a ramoops region in pmem, write a magic header block
> at the start of the area.  Then when pstore/ramoops goes to find the
> region, it checks for the magic header.  Not there? Don't write to the
> region. Problem solved.

That's effectively what this registration proposal will do. However,
with the caveat that the user never gets to write that magic header
without going through the nvdimm infrastructure to set it up, and
assert there is nothing there already.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] pstore/ram: Clarify resource reservation labels
  2018-10-18 22:33                 ` Dan Williams
@ 2018-10-18 22:58                   ` Kees Cook
  0 siblings, 0 replies; 15+ messages in thread
From: Kees Cook @ 2018-10-18 22:58 UTC (permalink / raw)
  To: Dan Williams
  Cc: Linux Kernel Mailing List, Anton Vorontsov, Colin Cross, Luck,
	Tony, Joel Fernandes, Ross Zwisler

On Thu, Oct 18, 2018 at 3:33 PM, Dan Williams <dan.j.williams@intel.com> wrote:
> On Thu, Oct 18, 2018 at 3:26 PM Kees Cook <keescook@chromium.org> wrote:
>>
>> On Thu, Oct 18, 2018 at 3:23 PM, Dan Williams <dan.j.williams@intel.com> wrote:
>> > On Thu, Oct 18, 2018 at 3:19 PM Kees Cook <keescook@chromium.org> wrote:
>> >>
>> >> On Thu, Oct 18, 2018 at 2:35 PM, Dan Williams <dan.j.williams@intel.com> wrote:
>> >> > On Thu, Oct 18, 2018 at 1:31 PM Kees Cook <keescook@chromium.org> wrote:
>> > [..]
>> >> > I cringe at users picking addresses because someone is going to enable
>> >> > ramoops on top of their persistent memory namespace and wonder why
>> >> > their filesystem got clobbered. Should attempts to specify an explicit
>> >> > ramoops range that intersects EfiPersistentMemory fail by default? The
>> >> > memmap=ss!nn parameter has burned us many times with users picking the
>> >> > wrong address, so I'd be inclined to hide this ramoops sharp edge from
>> >> > them.
>> >>
>> >> Yeah, this is what I'm trying to solve. I'd like ramoops to find the
>> >> address itself, but it has to do it really early, so if I can't have
>> >> nvdimm handle it directly, will having regions already allocated with
>> >> request_mem_region() "get along" with the rest of nvdimm?
>> >
>> > If the filesystem existed on the namespace before the user specified
>> > the ramoops command line then ramoops will clobber the filesystem and
>> > the user will only find out when mount later fails. All the kernel
>> > will say is:
>> >
>> >     dev_warn(dev, "could not reserve region %pR\n", res);
>> >
>> > ...from the pmem driver, and then the only way to figure who the
>> > conflict is with is to look at /proc/iomem, but the damage is already
>> > likely done by that point.
>>
>> Yeah, bleh. Okay, well, let's just skip this for now, since ramoops
>> doesn't do _anything_ with pmem now. No need to go crazy right from
>> the start. Instead, let's make it work "normally", and if someone
>> needs it for very early boot, they can manually enter the mem_address.
>>
>> How should I attach a ramoops_probe() call to pmem?
>
> To me this looks like it would be a nvdimm glue driver whose entire
> job is to attach to the namespace, fill out some
> ramoops_platform_data, and then register a "ramoops" platform_device
> for the ramoops driver to find.

That sounds right, yes. I'm happy to help review/test/etc.

-Kees

-- 
Kees Cook
Pixel Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-10-18 22:58 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-18  0:29 [PATCH] pstore/ram: Clarify resource reservation labels Kees Cook
2018-10-18  0:49 ` Dan Williams
2018-10-18  7:14   ` Kees Cook
2018-10-18 15:33     ` Dan Williams
2018-10-18 20:31       ` Kees Cook
2018-10-18 20:58         ` Ross Zwisler
2018-10-18 21:20           ` Kees Cook
2018-10-18 21:35         ` Dan Williams
2018-10-18 22:18           ` Kees Cook
2018-10-18 22:23             ` Dan Williams
2018-10-18 22:26               ` Kees Cook
2018-10-18 22:33                 ` Dan Williams
2018-10-18 22:58                   ` Kees Cook
2018-10-18 22:43                 ` Luck, Tony
2018-10-18 22:49                   ` Dan Williams

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.