linux-nvdimm.lists.01.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
@ 2019-10-17  7:33 Aneesh Kumar K.V
  2019-10-24  2:06 ` Dan Williams
  0 siblings, 1 reply; 7+ messages in thread
From: Aneesh Kumar K.V @ 2019-10-17  7:33 UTC (permalink / raw)
  To: dan.j.williams; +Cc: linux-nvdimm, Aneesh Kumar K.V

nvdimm core currently maps the full namespace to an ioremap range
while probing the namespace mode. This can result in probe failures
on architectures that have limited ioremap space.

For example, with a large btt namespace that consumes most of I/O remap
range, depending on the sequence of namespace initialization, the user can find
a pfn namespace initialization failure due to unavailable I/O remap space
which nvdimm core uses for temporary mapping.

nvdimm core can avoid this failure by only mapping the reserver block area to
check for pfn superblock type and map the full namespace resource only before
using the namespace.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
Changes from v2:
* update changelog

Changes from V1:
* update changelog
* update patch based on review feedback.

 drivers/dax/pmem/core.c   |  2 +-
 drivers/nvdimm/claim.c    |  7 +++----
 drivers/nvdimm/nd.h       |  4 ++--
 drivers/nvdimm/pfn.h      |  6 ++++++
 drivers/nvdimm/pfn_devs.c |  5 -----
 drivers/nvdimm/pmem.c     | 15 ++++++++++++---
 6 files changed, 24 insertions(+), 15 deletions(-)

diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 6eb6dfdf19bf..f174dbfbe1c4 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
 	nsio = to_nd_namespace_io(&ndns->dev);
 
 	/* parse the 'pfn' info block via ->rw_bytes */
-	rc = devm_nsio_enable(dev, nsio);
+	rc = devm_nsio_enable(dev, nsio, info_block_reserve());
 	if (rc)
 		return ERR_PTR(rc);
 	rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..d89d2c039e25 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
 	return rc;
 }
 
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
 {
 	struct resource *res = &nsio->res;
 	struct nd_namespace_common *ndns = &nsio->common;
 
-	nsio->size = resource_size(res);
+	nsio->size = size;
 	if (!devm_request_mem_region(dev, res->start, resource_size(res),
 				dev_name(&ndns->dev))) {
 		dev_warn(dev, "could not reserve region %pR\n", res);
@@ -318,8 +318,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
 			&nsio->res);
 
-	nsio->addr = devm_memremap(dev, res->start, resource_size(res),
-			ARCH_MEMREMAP_PMEM);
+	nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
 
 	return PTR_ERR_OR_ZERO(nsio->addr);
 }
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ee5c04070ef9..93d3c760c0f3 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -376,7 +376,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
 #define MAX_STRUCT_PAGE_SIZE 64
 
 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
 #else
 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
@@ -385,7 +385,7 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 	return -ENXIO;
 }
 static inline int devm_nsio_enable(struct device *dev,
-		struct nd_namespace_io *nsio)
+		struct nd_namespace_io *nsio, unsigned long size)
 {
 	return -ENXIO;
 }
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index acb19517f678..f4856c87d01c 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -36,4 +36,10 @@ struct nd_pfn_sb {
 	__le64 checksum;
 };
 
+static inline u32 info_block_reserve(void)
+{
+	return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
+
 #endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 60d81fae06ee..e49aa9a0fd04 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -635,11 +635,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
 }
 EXPORT_SYMBOL(nd_pfn_probe);
 
-static u32 info_block_reserve(void)
-{
-	return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
 /*
  * We hotplug memory at sub-section granularity, pad the reserved area
  * from the previous section base to the namespace base address.
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f9f76f6ba07b..3c188ffeff11 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -491,17 +491,26 @@ static int pmem_attach_disk(struct device *dev,
 static int nd_pmem_probe(struct device *dev)
 {
 	int ret;
+	struct nd_namespace_io *nsio;
 	struct nd_namespace_common *ndns;
 
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
 		return PTR_ERR(ndns);
 
-	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
-		return -ENXIO;
+	nsio = to_nd_namespace_io(&ndns->dev);
 
-	if (is_nd_btt(dev))
+	if (is_nd_btt(dev)) {
+		/*
+		 * Map with resource size
+		 */
+		if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
+			return -ENXIO;
 		return nvdimm_namespace_attach_btt(ndns);
+	}
+
+	if (devm_nsio_enable(dev, nsio, info_block_reserve()))
+		return -ENXIO;
 
 	if (is_nd_pfn(dev))
 		return pmem_attach_disk(dev, ndns);
-- 
2.21.0
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-17  7:33 [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping Aneesh Kumar K.V
@ 2019-10-24  2:06 ` Dan Williams
  2019-10-24  9:07   ` Aneesh Kumar K.V
  0 siblings, 1 reply; 7+ messages in thread
From: Dan Williams @ 2019-10-24  2:06 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: linux-nvdimm

On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> nvdimm core currently maps the full namespace to an ioremap range
> while probing the namespace mode. This can result in probe failures
> on architectures that have limited ioremap space.
>
> For example, with a large btt namespace that consumes most of I/O remap
> range, depending on the sequence of namespace initialization, the user can find
> a pfn namespace initialization failure due to unavailable I/O remap space
> which nvdimm core uses for temporary mapping.
>
> nvdimm core can avoid this failure by only mapping the reserver block area to

s/reserver/reserved/

> check for pfn superblock type and map the full namespace resource only before
> using the namespace.

Are you going to follow up with the BTT patch that uses this new facility?

>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
> Changes from v2:
> * update changelog
>
> Changes from V1:
> * update changelog
> * update patch based on review feedback.
>
>  drivers/dax/pmem/core.c   |  2 +-
>  drivers/nvdimm/claim.c    |  7 +++----
>  drivers/nvdimm/nd.h       |  4 ++--
>  drivers/nvdimm/pfn.h      |  6 ++++++
>  drivers/nvdimm/pfn_devs.c |  5 -----
>  drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>  6 files changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> --- a/drivers/dax/pmem/core.c
> +++ b/drivers/dax/pmem/core.c
> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>         nsio = to_nd_namespace_io(&ndns->dev);
>
>         /* parse the 'pfn' info block via ->rw_bytes */
> -       rc = devm_nsio_enable(dev, nsio);
> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>         if (rc)
>                 return ERR_PTR(rc);
>         rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> index 2985ca949912..d89d2c039e25 100644
> --- a/drivers/nvdimm/claim.c
> +++ b/drivers/nvdimm/claim.c
> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>         return rc;
>  }
>
> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>  {
>         struct resource *res = &nsio->res;
>         struct nd_namespace_common *ndns = &nsio->common;
>
> -       nsio->size = resource_size(res);
> +       nsio->size = size;

This needs a:

if (nsio->size)
   devm_memunmap(dev, nsio->addr);


>         if (!devm_request_mem_region(dev, res->start, resource_size(res),
>                                 dev_name(&ndns->dev))) {

Also don't repeat the devm_request_mem_region() if one was already done.

Another thing to check is if nsio->size gets cleared when the
namespace is disabled, if not that well need to be added in the
shutdown path.


>                 dev_warn(dev, "could not reserve region %pR\n", res);
> @@ -318,8 +318,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>         nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
>                         &nsio->res);
>
> -       nsio->addr = devm_memremap(dev, res->start, resource_size(res),
> -                       ARCH_MEMREMAP_PMEM);
> +       nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
>
>         return PTR_ERR_OR_ZERO(nsio->addr);
>  }
> diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
> index ee5c04070ef9..93d3c760c0f3 100644
> --- a/drivers/nvdimm/nd.h
> +++ b/drivers/nvdimm/nd.h
> @@ -376,7 +376,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
>  #define MAX_STRUCT_PAGE_SIZE 64
>
>  int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
>  void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
>  #else
>  static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
> @@ -385,7 +385,7 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>         return -ENXIO;
>  }
>  static inline int devm_nsio_enable(struct device *dev,
> -               struct nd_namespace_io *nsio)
> +               struct nd_namespace_io *nsio, unsigned long size)
>  {
>         return -ENXIO;
>  }
> diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
> index acb19517f678..f4856c87d01c 100644
> --- a/drivers/nvdimm/pfn.h
> +++ b/drivers/nvdimm/pfn.h
> @@ -36,4 +36,10 @@ struct nd_pfn_sb {
>         __le64 checksum;
>  };
>
> +static inline u32 info_block_reserve(void)
> +{
> +       return ALIGN(SZ_8K, PAGE_SIZE);
> +}
> +
> +
>  #endif /* __NVDIMM_PFN_H */
> diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
> index 60d81fae06ee..e49aa9a0fd04 100644
> --- a/drivers/nvdimm/pfn_devs.c
> +++ b/drivers/nvdimm/pfn_devs.c
> @@ -635,11 +635,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
>  }
>  EXPORT_SYMBOL(nd_pfn_probe);
>
> -static u32 info_block_reserve(void)
> -{
> -       return ALIGN(SZ_8K, PAGE_SIZE);
> -}
> -
>  /*
>   * We hotplug memory at sub-section granularity, pad the reserved area
>   * from the previous section base to the namespace base address.
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index f9f76f6ba07b..3c188ffeff11 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -491,17 +491,26 @@ static int pmem_attach_disk(struct device *dev,
>  static int nd_pmem_probe(struct device *dev)
>  {
>         int ret;
> +       struct nd_namespace_io *nsio;
>         struct nd_namespace_common *ndns;
>
>         ndns = nvdimm_namespace_common_probe(dev);
>         if (IS_ERR(ndns))
>                 return PTR_ERR(ndns);
>
> -       if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
> -               return -ENXIO;
> +       nsio = to_nd_namespace_io(&ndns->dev);
>
> -       if (is_nd_btt(dev))
> +       if (is_nd_btt(dev)) {
> +               /*
> +                * Map with resource size
> +                */
> +               if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
> +                       return -ENXIO;
>                 return nvdimm_namespace_attach_btt(ndns);
> +       }
> +
> +       if (devm_nsio_enable(dev, nsio, info_block_reserve()))
> +               return -ENXIO;
>
>         if (is_nd_pfn(dev))
>                 return pmem_attach_disk(dev, ndns);
> --
> 2.21.0
>
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-24  2:06 ` Dan Williams
@ 2019-10-24  9:07   ` Aneesh Kumar K.V
  2019-10-30 23:33     ` Dan Williams
  0 siblings, 1 reply; 7+ messages in thread
From: Aneesh Kumar K.V @ 2019-10-24  9:07 UTC (permalink / raw)
  To: Dan Williams; +Cc: linux-nvdimm

On 10/24/19 7:36 AM, Dan Williams wrote:
> On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
>>
>> nvdimm core currently maps the full namespace to an ioremap range
>> while probing the namespace mode. This can result in probe failures
>> on architectures that have limited ioremap space.
>>
>> For example, with a large btt namespace that consumes most of I/O remap
>> range, depending on the sequence of namespace initialization, the user can find
>> a pfn namespace initialization failure due to unavailable I/O remap space
>> which nvdimm core uses for temporary mapping.
>>
>> nvdimm core can avoid this failure by only mapping the reserver block area to
> 
> s/reserver/reserved/

Will fix


> 
>> check for pfn superblock type and map the full namespace resource only before
>> using the namespace.
> 
> Are you going to follow up with the BTT patch that uses this new facility?
> 

I am not yet sure about that. ioremap/vmap area is the way we get a 
kernel mapping without struct page backing. What we are suggesting hee 
is the ability to have a direct mapped mapping without struct page. I 
need to look at closely about what that imply.

>>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>> Changes from v2:
>> * update changelog
>>
>> Changes from V1:
>> * update changelog
>> * update patch based on review feedback.
>>
>>   drivers/dax/pmem/core.c   |  2 +-
>>   drivers/nvdimm/claim.c    |  7 +++----
>>   drivers/nvdimm/nd.h       |  4 ++--
>>   drivers/nvdimm/pfn.h      |  6 ++++++
>>   drivers/nvdimm/pfn_devs.c |  5 -----
>>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>>   6 files changed, 24 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
>> index 6eb6dfdf19bf..f174dbfbe1c4 100644
>> --- a/drivers/dax/pmem/core.c
>> +++ b/drivers/dax/pmem/core.c
>> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>>          nsio = to_nd_namespace_io(&ndns->dev);
>>
>>          /* parse the 'pfn' info block via ->rw_bytes */
>> -       rc = devm_nsio_enable(dev, nsio);
>> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>>          if (rc)
>>                  return ERR_PTR(rc);
>>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
>> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
>> index 2985ca949912..d89d2c039e25 100644
>> --- a/drivers/nvdimm/claim.c
>> +++ b/drivers/nvdimm/claim.c
>> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>>          return rc;
>>   }
>>
>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>>   {
>>          struct resource *res = &nsio->res;
>>          struct nd_namespace_common *ndns = &nsio->common;
>>
>> -       nsio->size = resource_size(res);
>> +       nsio->size = size;
> 
> This needs a:
> 
> if (nsio->size)
>     devm_memunmap(dev, nsio->addr);


Why ? we should not be calling devm_nsio_enable twice now.

> 
> 
>>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
>>                                  dev_name(&ndns->dev))) {
> 
> Also don't repeat the devm_request_mem_region() if one was already done.
> 
> Another thing to check is if nsio->size gets cleared when the
> namespace is disabled, if not that well need to be added in the
> shutdown path.
> 


Not sure I understand that. So with this patch when we probe we always 
use info_block_reserve() size irrespective of the device. This probe 
will result in us adding a btt/pfn/dax device and we return -ENXIO after 
this probe. This return value will cause us to destroy the I/O remap 
mmapping we did with info_block_reserve() size. Also the nsio data 
structure is also freed.

nd_pmem_probe is then again called with btt device type and in that case 
we do  devm_memremap again.

if (btt)
     remap the full namespace size.
else
    remap the info_block_reserve_size.


This infor block reserve mapping is unmapped in pmem_attach_disk()


Anything i am missing in the above flow?



> 
>>                  dev_warn(dev, "could not reserve region %pR\n", res);
>> @@ -318,8 +318,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>>          nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
>>                          &nsio->res);
>>
>> -       nsio->addr = devm_memremap(dev, res->start, resource_size(res),
>> -                       ARCH_MEMREMAP_PMEM);
>> +       nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
>>
>>          return PTR_ERR_OR_ZERO(nsio->addr);
>>   }
>> diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
>> index ee5c04070ef9..93d3c760c0f3 100644
>> --- a/drivers/nvdimm/nd.h
>> +++ b/drivers/nvdimm/nd.h
>> @@ -376,7 +376,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
>>   #define MAX_STRUCT_PAGE_SIZE 64
>>
>>   int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
>>   void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
>>   #else
>>   static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>> @@ -385,7 +385,7 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>>          return -ENXIO;
>>   }
>>   static inline int devm_nsio_enable(struct device *dev,
>> -               struct nd_namespace_io *nsio)
>> +               struct nd_namespace_io *nsio, unsigned long size)
>>   {
>>          return -ENXIO;
>>   }
>> diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
>> index acb19517f678..f4856c87d01c 100644
>> --- a/drivers/nvdimm/pfn.h
>> +++ b/drivers/nvdimm/pfn.h
>> @@ -36,4 +36,10 @@ struct nd_pfn_sb {
>>          __le64 checksum;
>>   };
>>
>> +static inline u32 info_block_reserve(void)
>> +{
>> +       return ALIGN(SZ_8K, PAGE_SIZE);
>> +}
>> +
>> +
>>   #endif /* __NVDIMM_PFN_H */
>> diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
>> index 60d81fae06ee..e49aa9a0fd04 100644
>> --- a/drivers/nvdimm/pfn_devs.c
>> +++ b/drivers/nvdimm/pfn_devs.c
>> @@ -635,11 +635,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
>>   }
>>   EXPORT_SYMBOL(nd_pfn_probe);
>>
>> -static u32 info_block_reserve(void)
>> -{
>> -       return ALIGN(SZ_8K, PAGE_SIZE);
>> -}
>> -
>>   /*
>>    * We hotplug memory at sub-section granularity, pad the reserved area
>>    * from the previous section base to the namespace base address.
>> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
>> index f9f76f6ba07b..3c188ffeff11 100644
>> --- a/drivers/nvdimm/pmem.c
>> +++ b/drivers/nvdimm/pmem.c
>> @@ -491,17 +491,26 @@ static int pmem_attach_disk(struct device *dev,
>>   static int nd_pmem_probe(struct device *dev)
>>   {
>>          int ret;
>> +       struct nd_namespace_io *nsio;
>>          struct nd_namespace_common *ndns;
>>
>>          ndns = nvdimm_namespace_common_probe(dev);
>>          if (IS_ERR(ndns))
>>                  return PTR_ERR(ndns);
>>
>> -       if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
>> -               return -ENXIO;
>> +       nsio = to_nd_namespace_io(&ndns->dev);
>>
>> -       if (is_nd_btt(dev))
>> +       if (is_nd_btt(dev)) {
>> +               /*
>> +                * Map with resource size
>> +                */
>> +               if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
>> +                       return -ENXIO;
>>                  return nvdimm_namespace_attach_btt(ndns);
>> +       }
>> +
>> +       if (devm_nsio_enable(dev, nsio, info_block_reserve()))
>> +               return -ENXIO;
>>
>>          if (is_nd_pfn(dev))
>>                  return pmem_attach_disk(dev, ndns);
>> --
>> 2.21.0
>>
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-24  9:07   ` Aneesh Kumar K.V
@ 2019-10-30 23:33     ` Dan Williams
  2019-10-31  3:55       ` Dan Williams
  0 siblings, 1 reply; 7+ messages in thread
From: Dan Williams @ 2019-10-30 23:33 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: linux-nvdimm

On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 10/24/19 7:36 AM, Dan Williams wrote:
> > On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> > <aneesh.kumar@linux.ibm.com> wrote:
> >>
> >> nvdimm core currently maps the full namespace to an ioremap range
> >> while probing the namespace mode. This can result in probe failures
> >> on architectures that have limited ioremap space.
> >>
> >> For example, with a large btt namespace that consumes most of I/O remap
> >> range, depending on the sequence of namespace initialization, the user can find
> >> a pfn namespace initialization failure due to unavailable I/O remap space
> >> which nvdimm core uses for temporary mapping.
> >>
> >> nvdimm core can avoid this failure by only mapping the reserver block area to
> >
> > s/reserver/reserved/
>
> Will fix
>
>
> >
> >> check for pfn superblock type and map the full namespace resource only before
> >> using the namespace.
> >
> > Are you going to follow up with the BTT patch that uses this new facility?
> >
>
> I am not yet sure about that. ioremap/vmap area is the way we get a
> kernel mapping without struct page backing. What we are suggesting hee
> is the ability to have a direct mapped mapping without struct page. I
> need to look at closely about what that imply.
>
> >>
> >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> >> ---
> >> Changes from v2:
> >> * update changelog
> >>
> >> Changes from V1:
> >> * update changelog
> >> * update patch based on review feedback.
> >>
> >>   drivers/dax/pmem/core.c   |  2 +-
> >>   drivers/nvdimm/claim.c    |  7 +++----
> >>   drivers/nvdimm/nd.h       |  4 ++--
> >>   drivers/nvdimm/pfn.h      |  6 ++++++
> >>   drivers/nvdimm/pfn_devs.c |  5 -----
> >>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
> >>   6 files changed, 24 insertions(+), 15 deletions(-)
> >>
> >> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> >> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> >> --- a/drivers/dax/pmem/core.c
> >> +++ b/drivers/dax/pmem/core.c
> >> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
> >>          nsio = to_nd_namespace_io(&ndns->dev);
> >>
> >>          /* parse the 'pfn' info block via ->rw_bytes */
> >> -       rc = devm_nsio_enable(dev, nsio);
> >> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
> >>          if (rc)
> >>                  return ERR_PTR(rc);
> >>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> >> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> >> index 2985ca949912..d89d2c039e25 100644
> >> --- a/drivers/nvdimm/claim.c
> >> +++ b/drivers/nvdimm/claim.c
> >> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
> >>          return rc;
> >>   }
> >>
> >> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> >> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
> >>   {
> >>          struct resource *res = &nsio->res;
> >>          struct nd_namespace_common *ndns = &nsio->common;
> >>
> >> -       nsio->size = resource_size(res);
> >> +       nsio->size = size;
> >
> > This needs a:
> >
> > if (nsio->size)
> >     devm_memunmap(dev, nsio->addr);
>
>
> Why ? we should not be calling devm_nsio_enable twice now.
>
> >
> >
> >>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
> >>                                  dev_name(&ndns->dev))) {
> >
> > Also don't repeat the devm_request_mem_region() if one was already done.
> >
> > Another thing to check is if nsio->size gets cleared when the
> > namespace is disabled, if not that well need to be added in the
> > shutdown path.
> >
>
>
> Not sure I understand that. So with this patch when we probe we always
> use info_block_reserve() size irrespective of the device. This probe
> will result in us adding a btt/pfn/dax device and we return -ENXIO after
> this probe. This return value will cause us to destroy the I/O remap
> mmapping we did with info_block_reserve() size. Also the nsio data
> structure is also freed.
>
> nd_pmem_probe is then again called with btt device type and in that case
> we do  devm_memremap again.
>
> if (btt)
>      remap the full namespace size.
> else
>     remap the info_block_reserve_size.
>
>
> This infor block reserve mapping is unmapped in pmem_attach_disk()
>
>
> Anything i am missing in the above flow?

Oh no, you're right, this does make the implementation only call
devm_nsio_enable() once. However, now that I look I want to suggest
some small reorganizations of where devm_nsio_enable() is called. I'll
take a stab at folding some changes on top of your patch.
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-30 23:33     ` Dan Williams
@ 2019-10-31  3:55       ` Dan Williams
  2019-10-31  4:10         ` Aneesh Kumar K.V
  0 siblings, 1 reply; 7+ messages in thread
From: Dan Williams @ 2019-10-31  3:55 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: linux-nvdimm

On Wed, Oct 30, 2019 at 4:33 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
> >
> > On 10/24/19 7:36 AM, Dan Williams wrote:
> > > On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> > > <aneesh.kumar@linux.ibm.com> wrote:
> > >>
> > >> nvdimm core currently maps the full namespace to an ioremap range
> > >> while probing the namespace mode. This can result in probe failures
> > >> on architectures that have limited ioremap space.
> > >>
> > >> For example, with a large btt namespace that consumes most of I/O remap
> > >> range, depending on the sequence of namespace initialization, the user can find
> > >> a pfn namespace initialization failure due to unavailable I/O remap space
> > >> which nvdimm core uses for temporary mapping.
> > >>
> > >> nvdimm core can avoid this failure by only mapping the reserver block area to
> > >
> > > s/reserver/reserved/
> >
> > Will fix
> >
> >
> > >
> > >> check for pfn superblock type and map the full namespace resource only before
> > >> using the namespace.
> > >
> > > Are you going to follow up with the BTT patch that uses this new facility?
> > >
> >
> > I am not yet sure about that. ioremap/vmap area is the way we get a
> > kernel mapping without struct page backing. What we are suggesting hee
> > is the ability to have a direct mapped mapping without struct page. I
> > need to look at closely about what that imply.
> >
> > >>
> > >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> > >> ---
> > >> Changes from v2:
> > >> * update changelog
> > >>
> > >> Changes from V1:
> > >> * update changelog
> > >> * update patch based on review feedback.
> > >>
> > >>   drivers/dax/pmem/core.c   |  2 +-
> > >>   drivers/nvdimm/claim.c    |  7 +++----
> > >>   drivers/nvdimm/nd.h       |  4 ++--
> > >>   drivers/nvdimm/pfn.h      |  6 ++++++
> > >>   drivers/nvdimm/pfn_devs.c |  5 -----
> > >>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
> > >>   6 files changed, 24 insertions(+), 15 deletions(-)
> > >>
> > >> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> > >> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> > >> --- a/drivers/dax/pmem/core.c
> > >> +++ b/drivers/dax/pmem/core.c
> > >> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
> > >>          nsio = to_nd_namespace_io(&ndns->dev);
> > >>
> > >>          /* parse the 'pfn' info block via ->rw_bytes */
> > >> -       rc = devm_nsio_enable(dev, nsio);
> > >> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
> > >>          if (rc)
> > >>                  return ERR_PTR(rc);
> > >>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> > >> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> > >> index 2985ca949912..d89d2c039e25 100644
> > >> --- a/drivers/nvdimm/claim.c
> > >> +++ b/drivers/nvdimm/claim.c
> > >> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
> > >>          return rc;
> > >>   }
> > >>
> > >> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> > >> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
> > >>   {
> > >>          struct resource *res = &nsio->res;
> > >>          struct nd_namespace_common *ndns = &nsio->common;
> > >>
> > >> -       nsio->size = resource_size(res);
> > >> +       nsio->size = size;
> > >
> > > This needs a:
> > >
> > > if (nsio->size)
> > >     devm_memunmap(dev, nsio->addr);
> >
> >
> > Why ? we should not be calling devm_nsio_enable twice now.
> >
> > >
> > >
> > >>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
> > >>                                  dev_name(&ndns->dev))) {
> > >
> > > Also don't repeat the devm_request_mem_region() if one was already done.
> > >
> > > Another thing to check is if nsio->size gets cleared when the
> > > namespace is disabled, if not that well need to be added in the
> > > shutdown path.
> > >
> >
> >
> > Not sure I understand that. So with this patch when we probe we always
> > use info_block_reserve() size irrespective of the device. This probe
> > will result in us adding a btt/pfn/dax device and we return -ENXIO after
> > this probe. This return value will cause us to destroy the I/O remap
> > mmapping we did with info_block_reserve() size. Also the nsio data
> > structure is also freed.
> >
> > nd_pmem_probe is then again called with btt device type and in that case
> > we do  devm_memremap again.
> >
> > if (btt)
> >      remap the full namespace size.
> > else
> >     remap the info_block_reserve_size.
> >
> >
> > This infor block reserve mapping is unmapped in pmem_attach_disk()
> >
> >
> > Anything i am missing in the above flow?
>
> Oh no, you're right, this does make the implementation only call
> devm_nsio_enable() once. However, now that I look I want to suggest
> some small reorganizations of where devm_nsio_enable() is called. I'll
> take a stab at folding some changes on top of your patch.

This change is causing the "pfn-meta-errors.sh" test to fail. It is
due to the fact the info_block_reserve() is not a sufficient
reservation for errors in the page metadata area.
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-31  3:55       ` Dan Williams
@ 2019-10-31  4:10         ` Aneesh Kumar K.V
  2019-10-31  4:19           ` Dan Williams
  0 siblings, 1 reply; 7+ messages in thread
From: Aneesh Kumar K.V @ 2019-10-31  4:10 UTC (permalink / raw)
  To: Dan Williams; +Cc: linux-nvdimm

On 10/31/19 9:25 AM, Dan Williams wrote:
> On Wed, Oct 30, 2019 at 4:33 PM Dan Williams <dan.j.williams@intel.com> wrote:
>>
>> On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
>> <aneesh.kumar@linux.ibm.com> wrote:
>>>
>>> On 10/24/19 7:36 AM, Dan Williams wrote:
>>>> On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
>>>> <aneesh.kumar@linux.ibm.com> wrote:
>>>>>
>>>>> nvdimm core currently maps the full namespace to an ioremap range
>>>>> while probing the namespace mode. This can result in probe failures
>>>>> on architectures that have limited ioremap space.
>>>>>
>>>>> For example, with a large btt namespace that consumes most of I/O remap
>>>>> range, depending on the sequence of namespace initialization, the user can find
>>>>> a pfn namespace initialization failure due to unavailable I/O remap space
>>>>> which nvdimm core uses for temporary mapping.
>>>>>
>>>>> nvdimm core can avoid this failure by only mapping the reserver block area to
>>>>
>>>> s/reserver/reserved/
>>>
>>> Will fix
>>>
>>>
>>>>
>>>>> check for pfn superblock type and map the full namespace resource only before
>>>>> using the namespace.
>>>>
>>>> Are you going to follow up with the BTT patch that uses this new facility?
>>>>
>>>
>>> I am not yet sure about that. ioremap/vmap area is the way we get a
>>> kernel mapping without struct page backing. What we are suggesting hee
>>> is the ability to have a direct mapped mapping without struct page. I
>>> need to look at closely about what that imply.
>>>
>>>>>
>>>>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>>>>> ---
>>>>> Changes from v2:
>>>>> * update changelog
>>>>>
>>>>> Changes from V1:
>>>>> * update changelog
>>>>> * update patch based on review feedback.
>>>>>
>>>>>    drivers/dax/pmem/core.c   |  2 +-
>>>>>    drivers/nvdimm/claim.c    |  7 +++----
>>>>>    drivers/nvdimm/nd.h       |  4 ++--
>>>>>    drivers/nvdimm/pfn.h      |  6 ++++++
>>>>>    drivers/nvdimm/pfn_devs.c |  5 -----
>>>>>    drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>>>>>    6 files changed, 24 insertions(+), 15 deletions(-)
>>>>>
>>>>> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
>>>>> index 6eb6dfdf19bf..f174dbfbe1c4 100644
>>>>> --- a/drivers/dax/pmem/core.c
>>>>> +++ b/drivers/dax/pmem/core.c
>>>>> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>>>>>           nsio = to_nd_namespace_io(&ndns->dev);
>>>>>
>>>>>           /* parse the 'pfn' info block via ->rw_bytes */
>>>>> -       rc = devm_nsio_enable(dev, nsio);
>>>>> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>>>>>           if (rc)
>>>>>                   return ERR_PTR(rc);
>>>>>           rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
>>>>> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
>>>>> index 2985ca949912..d89d2c039e25 100644
>>>>> --- a/drivers/nvdimm/claim.c
>>>>> +++ b/drivers/nvdimm/claim.c
>>>>> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>>>>>           return rc;
>>>>>    }
>>>>>
>>>>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>>>>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>>>>>    {
>>>>>           struct resource *res = &nsio->res;
>>>>>           struct nd_namespace_common *ndns = &nsio->common;
>>>>>
>>>>> -       nsio->size = resource_size(res);
>>>>> +       nsio->size = size;
>>>>
>>>> This needs a:
>>>>
>>>> if (nsio->size)
>>>>      devm_memunmap(dev, nsio->addr);
>>>
>>>
>>> Why ? we should not be calling devm_nsio_enable twice now.
>>>
>>>>
>>>>
>>>>>           if (!devm_request_mem_region(dev, res->start, resource_size(res),
>>>>>                                   dev_name(&ndns->dev))) {
>>>>
>>>> Also don't repeat the devm_request_mem_region() if one was already done.
>>>>
>>>> Another thing to check is if nsio->size gets cleared when the
>>>> namespace is disabled, if not that well need to be added in the
>>>> shutdown path.
>>>>
>>>
>>>
>>> Not sure I understand that. So with this patch when we probe we always
>>> use info_block_reserve() size irrespective of the device. This probe
>>> will result in us adding a btt/pfn/dax device and we return -ENXIO after
>>> this probe. This return value will cause us to destroy the I/O remap
>>> mmapping we did with info_block_reserve() size. Also the nsio data
>>> structure is also freed.
>>>
>>> nd_pmem_probe is then again called with btt device type and in that case
>>> we do  devm_memremap again.
>>>
>>> if (btt)
>>>       remap the full namespace size.
>>> else
>>>      remap the info_block_reserve_size.
>>>
>>>
>>> This infor block reserve mapping is unmapped in pmem_attach_disk()
>>>
>>>
>>> Anything i am missing in the above flow?
>>
>> Oh no, you're right, this does make the implementation only call
>> devm_nsio_enable() once. However, now that I look I want to suggest
>> some small reorganizations of where devm_nsio_enable() is called. I'll
>> take a stab at folding some changes on top of your patch.
> 
> This change is causing the "pfn-meta-errors.sh" test to fail. It is
> due to the fact the info_block_reserve() is not a sufficient
> reservation for errors in the page metadata area.
> 

Can you share the call stack? we clear bad blocks on write isn't? and we 
do only read while probing?

We are still working on enabling `ndctl test` to run on POWER. Hence was 
not able capture these issues. Will try to get that resolved soon.

-aneesh
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping
  2019-10-31  4:10         ` Aneesh Kumar K.V
@ 2019-10-31  4:19           ` Dan Williams
  0 siblings, 0 replies; 7+ messages in thread
From: Dan Williams @ 2019-10-31  4:19 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: linux-nvdimm

[-- Attachment #1: Type: text/plain, Size: 1177 bytes --]

On Wed, Oct 30, 2019 at 9:10 PM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
[..]
> >
>
> Can you share the call stack? we clear bad blocks on write isn't? and we
> do only read while probing?
>
> We are still working on enabling `ndctl test` to run on POWER. Hence was
> not able capture these issues. Will try to get that resolved soon.

Here's the splat. I've also attached a reworked version of the patch
that has code organization fixups. It might be the case that it needs
to fully move to a model where each nd_*_probe() routine does its own
management of devm_namespace_enable() based on the sizes it needs to
complete the probe process.

nd_pmem namespace3.0: request out of range
WARNING: CPU: 16 PID: 1551 at
tools/testing/nvdimm/../../../drivers/nvdimm/claim.c:264
nsio_rw_bytes+0x14c/0x260 [libnvdimm]
RIP: 0010:nsio_rw_bytes+0x14c/0x260 [libnvdimm]
Call Trace:
 nd_pfn_validate+0x3d9/0x560 [libnvdimm]
 nd_pfn_probe+0xc0/0x150 [libnvdimm]
 ? nd_btt_probe+0x1e/0x270 [libnvdimm]
 nd_pmem_probe+0x63/0xc0 [nd_pmem]
 nvdimm_bus_probe+0x91/0x310 [libnvdimm]

That nd_pfn_validate+0x3d9 corresponds to the nvdimm_write_bytes() in
nd_pfn_clear_memmap_errors().

[-- Attachment #2: namespace-enable.patch --]
[-- Type: text/x-patch, Size: 12131 bytes --]

libnvdimm/namespace: Differentiate between probe mapping and runtime mapping

From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

The nvdimm core currently maps the full namespace to an ioremap range
while probing the namespace mode. This can result in probe failures on
architectures that have limited ioremap space.

For example, with a large btt namespace that consumes most of I/O remap
range, depending on the sequence of namespace initialization, the user
can find a pfn namespace initialization failure due to unavailable I/O
remap space which nvdimm core uses for temporary mapping.

nvdimm core can avoid this failure by only mapping the reserved info
block area to check for pfn superblock type and map the full namespace
resource only before using the namespace.

Given that personalities like BTT can be layered on top of any namespace
type create a generic form of devm_nsio_enable (devm_namespace_enable)
and use it inside the per-personality attach routines. Now
devm_namespace_enable() is always paired with disable unless the mapping
is going to be used for long term runtime access.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20191017073308.32645-1-aneesh.kumar@linux.ibm.com
[djbw: reworks to move devm_namespace_{en,dis}able into *attach helpers]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/dax/pmem/core.c         |    6 +++---
 drivers/nvdimm/btt.c            |   10 ++++++++--
 drivers/nvdimm/claim.c          |   14 ++++++--------
 drivers/nvdimm/namespace_devs.c |   17 +++++++++++++++++
 drivers/nvdimm/nd-core.h        |   16 ++++++++++++++++
 drivers/nvdimm/nd.h             |   22 +++++++++-------------
 drivers/nvdimm/pfn_devs.c       |    9 ++-------
 drivers/nvdimm/pmem.c           |   17 +++++++++++++----
 8 files changed, 74 insertions(+), 37 deletions(-)

diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 6eb6dfdf19bf..2bedf8414fff 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -25,20 +25,20 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
 		return ERR_CAST(ndns);
-	nsio = to_nd_namespace_io(&ndns->dev);
 
 	/* parse the 'pfn' info block via ->rw_bytes */
-	rc = devm_nsio_enable(dev, nsio);
+	rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
 	if (rc)
 		return ERR_PTR(rc);
 	rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
 	if (rc)
 		return ERR_PTR(rc);
-	devm_nsio_disable(dev, nsio);
+	devm_namespace_disable(dev, ndns);
 
 	/* reserve the metadata area, device-dax will reserve the data */
 	pfn_sb = nd_pfn->pfn_sb;
 	offset = le64_to_cpu(pfn_sb->dataoff);
+	nsio = to_nd_namespace_io(&ndns->dev);
 	if (!devm_request_mem_region(dev, nsio->res.start, offset,
 				dev_name(&ndns->dev))) {
 		dev_warn(dev, "could not reserve metadata\n");
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 3e9f45aec8d1..8cb890a987b0 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1674,7 +1674,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
 	struct nd_region *nd_region;
 	struct btt_sb *btt_sb;
 	struct btt *btt;
-	size_t rawsize;
+	size_t size, rawsize;
+	int rc;
 
 	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
 		dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
@@ -1685,6 +1686,11 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
 	if (!btt_sb)
 		return -ENOMEM;
 
+	size = nvdimm_namespace_capacity(ndns);
+	rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
+	if (rc)
+		return rc;
+
 	/*
 	 * If this returns < 0, that is ok as it just means there wasn't
 	 * an existing BTT, and we're creating a new one. We still need to
@@ -1693,7 +1699,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
 	 */
 	nd_btt_version(nd_btt, ndns, btt_sb);
 
-	rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
+	rawsize = size - nd_btt->initial_offset;
 	if (rawsize < ARENA_MIN_SIZE) {
 		dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
 				dev_name(&ndns->dev),
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..45964acba944 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -300,13 +300,14 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
 	return rc;
 }
 
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+		resource_size_t size)
 {
 	struct resource *res = &nsio->res;
 	struct nd_namespace_common *ndns = &nsio->common;
 
-	nsio->size = resource_size(res);
-	if (!devm_request_mem_region(dev, res->start, resource_size(res),
+	nsio->size = size;
+	if (!devm_request_mem_region(dev, res->start, size,
 				dev_name(&ndns->dev))) {
 		dev_warn(dev, "could not reserve region %pR\n", res);
 		return -EBUSY;
@@ -318,12 +319,10 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
 			&nsio->res);
 
-	nsio->addr = devm_memremap(dev, res->start, resource_size(res),
-			ARCH_MEMREMAP_PMEM);
+	nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
 
 	return PTR_ERR_OR_ZERO(nsio->addr);
 }
-EXPORT_SYMBOL_GPL(devm_nsio_enable);
 
 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
 {
@@ -331,6 +330,5 @@ void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
 
 	devm_memunmap(dev, nsio->addr);
 	devm_exit_badblocks(dev, &nsio->bb);
-	devm_release_mem_region(dev, res->start, resource_size(res));
+	devm_release_mem_region(dev, res->start, nsio->size);
 }
-EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 85b553094d19..22fab80a96fc 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1763,6 +1763,23 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
 }
 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
 
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+		resource_size_t size)
+{
+	if (is_namespace_blk(&ndns->dev))
+		return 0;
+	return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
+}
+EXPORT_SYMBOL_GPL(devm_namespace_enable);
+
+void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
+{
+	if (is_namespace_blk(&ndns->dev))
+		return;
+	devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
+}
+EXPORT_SYMBOL_GPL(devm_namespace_disable);
+
 static struct device **create_namespace_io(struct nd_region *nd_region)
 {
 	struct nd_namespace_io *nsio;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 25fa121104d0..25ae33da987d 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -171,6 +171,22 @@ ssize_t nd_namespace_store(struct device *dev,
 struct nd_pfn *to_nd_pfn_safe(struct device *dev);
 bool is_nvdimm_bus(struct device *dev);
 
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+		resource_size_t size);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline int devm_nsio_enable(struct device *dev,
+		struct nd_namespace_io *nsio, resource_size_t size)
+{
+	return -ENXIO;
+}
+
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
+{
+}
+#endif
+
 #ifdef CONFIG_PROVE_LOCKING
 extern struct class *nd_class;
 
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 577800242110..386f19507b13 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -212,6 +212,11 @@ struct nd_dax {
 	struct nd_pfn nd_pfn;
 };
 
+static inline u32 nd_info_block_reserve(void)
+{
+	return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
 enum nd_async_mode {
 	ND_SYNC,
 	ND_ASYNC,
@@ -373,29 +378,20 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
 void nvdimm_badblocks_populate(struct nd_region *nd_region,
 		struct badblocks *bb, const struct resource *res);
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+		resource_size_t size);
+void devm_namespace_disable(struct device *dev,
+		struct nd_namespace_common *ndns);
 #if IS_ENABLED(CONFIG_ND_CLAIM)
-
 /* max struct page size independent of kernel config */
 #define MAX_STRUCT_PAGE_SIZE 64
-
 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
-void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
 #else
 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 				   struct dev_pagemap *pgmap)
 {
 	return -ENXIO;
 }
-static inline int devm_nsio_enable(struct device *dev,
-		struct nd_namespace_io *nsio)
-{
-	return -ENXIO;
-}
-static inline void devm_nsio_disable(struct device *dev,
-		struct nd_namespace_io *nsio)
-{
-}
 #endif
 int nd_blk_region_init(struct nd_region *nd_region);
 int nd_region_activate(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 9f9cb51301e7..927465b35caa 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -627,11 +627,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
 }
 EXPORT_SYMBOL(nd_pfn_probe);
 
-static u32 info_block_reserve(void)
-{
-	return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
 /*
  * We hotplug memory at sub-section granularity, pad the reserved area
  * from the previous section base to the namespace base address.
@@ -645,7 +640,7 @@ static unsigned long init_altmap_base(resource_size_t base)
 
 static unsigned long init_altmap_reserve(resource_size_t base)
 {
-	unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
+	unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
 	unsigned long base_pfn = PHYS_PFN(base);
 
 	reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
@@ -660,7 +655,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
 	u64 offset = le64_to_cpu(pfn_sb->dataoff);
 	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
 	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
-	u32 reserve = info_block_reserve();
+	u32 reserve = nd_info_block_reserve();
 	struct nd_namespace_common *ndns = nd_pfn->ndns;
 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 	resource_size_t base = nsio->res.start + start_pad;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f9f76f6ba07b..7a6f4501dcda 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -372,6 +372,10 @@ static int pmem_attach_disk(struct device *dev,
 	if (!pmem)
 		return -ENOMEM;
 
+	rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+	if (rc)
+		return rc;
+
 	/* while nsio_rw_bytes is active, parse a pfn info block if present */
 	if (is_nd_pfn(dev)) {
 		nd_pfn = to_nd_pfn(dev);
@@ -381,7 +385,7 @@ static int pmem_attach_disk(struct device *dev,
 	}
 
 	/* we're attaching a block device, disable raw namespace access */
-	devm_nsio_disable(dev, nsio);
+	devm_namespace_disable(dev, ndns);
 
 	dev_set_drvdata(dev, pmem);
 	pmem->phys_addr = res->start;
@@ -497,15 +501,16 @@ static int nd_pmem_probe(struct device *dev)
 	if (IS_ERR(ndns))
 		return PTR_ERR(ndns);
 
-	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
-		return -ENXIO;
-
 	if (is_nd_btt(dev))
 		return nvdimm_namespace_attach_btt(ndns);
 
 	if (is_nd_pfn(dev))
 		return pmem_attach_disk(dev, ndns);
 
+	ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+	if (ret)
+		return ret;
+
 	ret = nd_btt_probe(dev, ndns);
 	if (ret == 0)
 		return -ENXIO;
@@ -532,6 +537,10 @@ static int nd_pmem_probe(struct device *dev)
 		return -ENXIO;
 	else if (ret == -EOPNOTSUPP)
 		return ret;
+
+	/* probe complete, attach handles namespace enabling */
+	devm_namespace_disable(dev, ndns);
+
 	return pmem_attach_disk(dev, ndns);
 }
 

[-- Attachment #3: Type: text/plain, Size: 167 bytes --]

_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-10-31  4:19 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-17  7:33 [PATCH v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping Aneesh Kumar K.V
2019-10-24  2:06 ` Dan Williams
2019-10-24  9:07   ` Aneesh Kumar K.V
2019-10-30 23:33     ` Dan Williams
2019-10-31  3:55       ` Dan Williams
2019-10-31  4:10         ` Aneesh Kumar K.V
2019-10-31  4:19           ` Dan Williams

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).