From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> To: Dan Williams <dan.j.williams@intel.com>, linux-nvdimm@lists.01.org Cc: peterz@infradead.org, dave.hansen@linux.intel.com, linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: Re: [PATCH 02/16] libnvdimm: Move region attribute group definition Date: Tue, 12 Nov 2019 16:59:19 +0530 [thread overview] Message-ID: <87blthtjsg.fsf@linux.ibm.com> (raw) In-Reply-To: <157309900624.1582359.6929998072035982264.stgit@dwillia2-desk3.amr.corp.intel.com> Dan Williams <dan.j.williams@intel.com> writes: > In preparation for moving region attributes from device attribute groups > to the region device-type, reorder the declaration so that it can be > referenced by the device-type definition without forward declarations. > No functional changes are intended to result from this change. > Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> > Cc: Ira Weiny <ira.weiny@intel.com> > Cc: Vishal Verma <vishal.l.verma@intel.com> > Signed-off-by: Dan Williams <dan.j.williams@intel.com> > --- > drivers/nvdimm/region_devs.c | 208 +++++++++++++++++++++--------------------- > 1 file changed, 104 insertions(+), 104 deletions(-) > > diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c > index ef423ba1a711..e89f2eb3678c 100644 > --- a/drivers/nvdimm/region_devs.c > +++ b/drivers/nvdimm/region_devs.c > @@ -140,36 +140,6 @@ static void nd_region_release(struct device *dev) > kfree(nd_region); > } > > -static struct device_type nd_blk_device_type = { > - .name = "nd_blk", > - .release = nd_region_release, > -}; > - > -static struct device_type nd_pmem_device_type = { > - .name = "nd_pmem", > - .release = nd_region_release, > -}; > - > -static struct device_type nd_volatile_device_type = { > - .name = "nd_volatile", > - .release = nd_region_release, > -}; > - > -bool is_nd_pmem(struct device *dev) > -{ > - return dev ? dev->type == &nd_pmem_device_type : false; > -} > - > -bool is_nd_blk(struct device *dev) > -{ > - return dev ? dev->type == &nd_blk_device_type : false; > -} > - > -bool is_nd_volatile(struct device *dev) > -{ > - return dev ? dev->type == &nd_volatile_device_type : false; > -} > - > struct nd_region *to_nd_region(struct device *dev) > { > struct nd_region *nd_region = container_of(dev, struct nd_region, dev); > @@ -674,80 +644,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) > return 0; > } > > -struct attribute_group nd_region_attribute_group = { > - .attrs = nd_region_attributes, > - .is_visible = region_visible, > -}; > -EXPORT_SYMBOL_GPL(nd_region_attribute_group); > - > -u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, > - struct nd_namespace_index *nsindex) > -{ > - struct nd_interleave_set *nd_set = nd_region->nd_set; > - > - if (!nd_set) > - return 0; > - > - if (nsindex && __le16_to_cpu(nsindex->major) == 1 > - && __le16_to_cpu(nsindex->minor) == 1) > - return nd_set->cookie1; > - return nd_set->cookie2; > -} > - > -u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) > -{ > - struct nd_interleave_set *nd_set = nd_region->nd_set; > - > - if (nd_set) > - return nd_set->altcookie; > - return 0; > -} > - > -void nd_mapping_free_labels(struct nd_mapping *nd_mapping) > -{ > - struct nd_label_ent *label_ent, *e; > - > - lockdep_assert_held(&nd_mapping->lock); > - list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { > - list_del(&label_ent->list); > - kfree(label_ent); > - } > -} > - > -/* > - * When a namespace is activated create new seeds for the next > - * namespace, or namespace-personality to be configured. > - */ > -void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) > -{ > - nvdimm_bus_lock(dev); > - if (nd_region->ns_seed == dev) { > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_btt(dev)) { > - struct nd_btt *nd_btt = to_nd_btt(dev); > - > - if (nd_region->btt_seed == dev) > - nd_region_create_btt_seed(nd_region); > - if (nd_region->ns_seed == &nd_btt->ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_pfn(dev)) { > - struct nd_pfn *nd_pfn = to_nd_pfn(dev); > - > - if (nd_region->pfn_seed == dev) > - nd_region_create_pfn_seed(nd_region); > - if (nd_region->ns_seed == &nd_pfn->ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_dax(dev)) { > - struct nd_dax *nd_dax = to_nd_dax(dev); > - > - if (nd_region->dax_seed == dev) > - nd_region_create_dax_seed(nd_region); > - if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } > - nvdimm_bus_unlock(dev); > -} > - > static ssize_t mappingN(struct device *dev, char *buf, int n) > { > struct nd_region *nd_region = to_nd_region(dev); > @@ -861,6 +757,110 @@ struct attribute_group nd_mapping_attribute_group = { > }; > EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); > > +struct attribute_group nd_region_attribute_group = { > + .attrs = nd_region_attributes, > + .is_visible = region_visible, > +}; > +EXPORT_SYMBOL_GPL(nd_region_attribute_group); > + > +static struct device_type nd_blk_device_type = { > + .name = "nd_blk", > + .release = nd_region_release, > +}; > + > +static struct device_type nd_pmem_device_type = { > + .name = "nd_pmem", > + .release = nd_region_release, > +}; > + > +static struct device_type nd_volatile_device_type = { > + .name = "nd_volatile", > + .release = nd_region_release, > +}; > + > +bool is_nd_pmem(struct device *dev) > +{ > + return dev ? dev->type == &nd_pmem_device_type : false; > +} > + > +bool is_nd_blk(struct device *dev) > +{ > + return dev ? dev->type == &nd_blk_device_type : false; > +} > + > +bool is_nd_volatile(struct device *dev) > +{ > + return dev ? dev->type == &nd_volatile_device_type : false; > +} > + > +u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, > + struct nd_namespace_index *nsindex) > +{ > + struct nd_interleave_set *nd_set = nd_region->nd_set; > + > + if (!nd_set) > + return 0; > + > + if (nsindex && __le16_to_cpu(nsindex->major) == 1 > + && __le16_to_cpu(nsindex->minor) == 1) > + return nd_set->cookie1; > + return nd_set->cookie2; > +} > + > +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) > +{ > + struct nd_interleave_set *nd_set = nd_region->nd_set; > + > + if (nd_set) > + return nd_set->altcookie; > + return 0; > +} > + > +void nd_mapping_free_labels(struct nd_mapping *nd_mapping) > +{ > + struct nd_label_ent *label_ent, *e; > + > + lockdep_assert_held(&nd_mapping->lock); > + list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { > + list_del(&label_ent->list); > + kfree(label_ent); > + } > +} > + > +/* > + * When a namespace is activated create new seeds for the next > + * namespace, or namespace-personality to be configured. > + */ > +void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) > +{ > + nvdimm_bus_lock(dev); > + if (nd_region->ns_seed == dev) { > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_btt(dev)) { > + struct nd_btt *nd_btt = to_nd_btt(dev); > + > + if (nd_region->btt_seed == dev) > + nd_region_create_btt_seed(nd_region); > + if (nd_region->ns_seed == &nd_btt->ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_pfn(dev)) { > + struct nd_pfn *nd_pfn = to_nd_pfn(dev); > + > + if (nd_region->pfn_seed == dev) > + nd_region_create_pfn_seed(nd_region); > + if (nd_region->ns_seed == &nd_pfn->ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_dax(dev)) { > + struct nd_dax *nd_dax = to_nd_dax(dev); > + > + if (nd_region->dax_seed == dev) > + nd_region_create_dax_seed(nd_region); > + if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } > + nvdimm_bus_unlock(dev); > +} > + > int nd_blk_region_init(struct nd_region *nd_region) > { > struct device *dev = &nd_region->dev; > _______________________________________________ > Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org > To unsubscribe send an email to linux-nvdimm-leave@lists.01.org _______________________________________________ Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org To unsubscribe send an email to linux-nvdimm-leave@lists.01.org
WARNING: multiple messages have this Message-ID (diff)
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> To: Dan Williams <dan.j.williams@intel.com>, linux-nvdimm@lists.01.org Cc: peterz@infradead.org, dave.hansen@linux.intel.com, linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: Re: [PATCH 02/16] libnvdimm: Move region attribute group definition Date: Tue, 12 Nov 2019 16:59:19 +0530 [thread overview] Message-ID: <87blthtjsg.fsf@linux.ibm.com> (raw) In-Reply-To: <157309900624.1582359.6929998072035982264.stgit@dwillia2-desk3.amr.corp.intel.com> Dan Williams <dan.j.williams@intel.com> writes: > In preparation for moving region attributes from device attribute groups > to the region device-type, reorder the declaration so that it can be > referenced by the device-type definition without forward declarations. > No functional changes are intended to result from this change. > Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> > Cc: Ira Weiny <ira.weiny@intel.com> > Cc: Vishal Verma <vishal.l.verma@intel.com> > Signed-off-by: Dan Williams <dan.j.williams@intel.com> > --- > drivers/nvdimm/region_devs.c | 208 +++++++++++++++++++++--------------------- > 1 file changed, 104 insertions(+), 104 deletions(-) > > diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c > index ef423ba1a711..e89f2eb3678c 100644 > --- a/drivers/nvdimm/region_devs.c > +++ b/drivers/nvdimm/region_devs.c > @@ -140,36 +140,6 @@ static void nd_region_release(struct device *dev) > kfree(nd_region); > } > > -static struct device_type nd_blk_device_type = { > - .name = "nd_blk", > - .release = nd_region_release, > -}; > - > -static struct device_type nd_pmem_device_type = { > - .name = "nd_pmem", > - .release = nd_region_release, > -}; > - > -static struct device_type nd_volatile_device_type = { > - .name = "nd_volatile", > - .release = nd_region_release, > -}; > - > -bool is_nd_pmem(struct device *dev) > -{ > - return dev ? dev->type == &nd_pmem_device_type : false; > -} > - > -bool is_nd_blk(struct device *dev) > -{ > - return dev ? dev->type == &nd_blk_device_type : false; > -} > - > -bool is_nd_volatile(struct device *dev) > -{ > - return dev ? dev->type == &nd_volatile_device_type : false; > -} > - > struct nd_region *to_nd_region(struct device *dev) > { > struct nd_region *nd_region = container_of(dev, struct nd_region, dev); > @@ -674,80 +644,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) > return 0; > } > > -struct attribute_group nd_region_attribute_group = { > - .attrs = nd_region_attributes, > - .is_visible = region_visible, > -}; > -EXPORT_SYMBOL_GPL(nd_region_attribute_group); > - > -u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, > - struct nd_namespace_index *nsindex) > -{ > - struct nd_interleave_set *nd_set = nd_region->nd_set; > - > - if (!nd_set) > - return 0; > - > - if (nsindex && __le16_to_cpu(nsindex->major) == 1 > - && __le16_to_cpu(nsindex->minor) == 1) > - return nd_set->cookie1; > - return nd_set->cookie2; > -} > - > -u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) > -{ > - struct nd_interleave_set *nd_set = nd_region->nd_set; > - > - if (nd_set) > - return nd_set->altcookie; > - return 0; > -} > - > -void nd_mapping_free_labels(struct nd_mapping *nd_mapping) > -{ > - struct nd_label_ent *label_ent, *e; > - > - lockdep_assert_held(&nd_mapping->lock); > - list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { > - list_del(&label_ent->list); > - kfree(label_ent); > - } > -} > - > -/* > - * When a namespace is activated create new seeds for the next > - * namespace, or namespace-personality to be configured. > - */ > -void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) > -{ > - nvdimm_bus_lock(dev); > - if (nd_region->ns_seed == dev) { > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_btt(dev)) { > - struct nd_btt *nd_btt = to_nd_btt(dev); > - > - if (nd_region->btt_seed == dev) > - nd_region_create_btt_seed(nd_region); > - if (nd_region->ns_seed == &nd_btt->ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_pfn(dev)) { > - struct nd_pfn *nd_pfn = to_nd_pfn(dev); > - > - if (nd_region->pfn_seed == dev) > - nd_region_create_pfn_seed(nd_region); > - if (nd_region->ns_seed == &nd_pfn->ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } else if (is_nd_dax(dev)) { > - struct nd_dax *nd_dax = to_nd_dax(dev); > - > - if (nd_region->dax_seed == dev) > - nd_region_create_dax_seed(nd_region); > - if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) > - nd_region_create_ns_seed(nd_region); > - } > - nvdimm_bus_unlock(dev); > -} > - > static ssize_t mappingN(struct device *dev, char *buf, int n) > { > struct nd_region *nd_region = to_nd_region(dev); > @@ -861,6 +757,110 @@ struct attribute_group nd_mapping_attribute_group = { > }; > EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); > > +struct attribute_group nd_region_attribute_group = { > + .attrs = nd_region_attributes, > + .is_visible = region_visible, > +}; > +EXPORT_SYMBOL_GPL(nd_region_attribute_group); > + > +static struct device_type nd_blk_device_type = { > + .name = "nd_blk", > + .release = nd_region_release, > +}; > + > +static struct device_type nd_pmem_device_type = { > + .name = "nd_pmem", > + .release = nd_region_release, > +}; > + > +static struct device_type nd_volatile_device_type = { > + .name = "nd_volatile", > + .release = nd_region_release, > +}; > + > +bool is_nd_pmem(struct device *dev) > +{ > + return dev ? dev->type == &nd_pmem_device_type : false; > +} > + > +bool is_nd_blk(struct device *dev) > +{ > + return dev ? dev->type == &nd_blk_device_type : false; > +} > + > +bool is_nd_volatile(struct device *dev) > +{ > + return dev ? dev->type == &nd_volatile_device_type : false; > +} > + > +u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, > + struct nd_namespace_index *nsindex) > +{ > + struct nd_interleave_set *nd_set = nd_region->nd_set; > + > + if (!nd_set) > + return 0; > + > + if (nsindex && __le16_to_cpu(nsindex->major) == 1 > + && __le16_to_cpu(nsindex->minor) == 1) > + return nd_set->cookie1; > + return nd_set->cookie2; > +} > + > +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) > +{ > + struct nd_interleave_set *nd_set = nd_region->nd_set; > + > + if (nd_set) > + return nd_set->altcookie; > + return 0; > +} > + > +void nd_mapping_free_labels(struct nd_mapping *nd_mapping) > +{ > + struct nd_label_ent *label_ent, *e; > + > + lockdep_assert_held(&nd_mapping->lock); > + list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { > + list_del(&label_ent->list); > + kfree(label_ent); > + } > +} > + > +/* > + * When a namespace is activated create new seeds for the next > + * namespace, or namespace-personality to be configured. > + */ > +void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) > +{ > + nvdimm_bus_lock(dev); > + if (nd_region->ns_seed == dev) { > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_btt(dev)) { > + struct nd_btt *nd_btt = to_nd_btt(dev); > + > + if (nd_region->btt_seed == dev) > + nd_region_create_btt_seed(nd_region); > + if (nd_region->ns_seed == &nd_btt->ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_pfn(dev)) { > + struct nd_pfn *nd_pfn = to_nd_pfn(dev); > + > + if (nd_region->pfn_seed == dev) > + nd_region_create_pfn_seed(nd_region); > + if (nd_region->ns_seed == &nd_pfn->ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } else if (is_nd_dax(dev)) { > + struct nd_dax *nd_dax = to_nd_dax(dev); > + > + if (nd_region->dax_seed == dev) > + nd_region_create_dax_seed(nd_region); > + if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) > + nd_region_create_ns_seed(nd_region); > + } > + nvdimm_bus_unlock(dev); > +} > + > int nd_blk_region_init(struct nd_region *nd_region) > { > struct device *dev = &nd_region->dev; > _______________________________________________ > Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org > To unsubscribe send an email to linux-nvdimm-leave@lists.01.org
next prev parent reply other threads:[~2019-11-12 11:29 UTC|newest] Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-11-07 3:56 [PATCH 00/16] Memory Hierarchy: Enable target node lookups for reserved memory Dan Williams 2019-11-07 3:56 ` Dan Williams 2019-11-07 3:56 ` [PATCH 01/16] libnvdimm: Move attribute groups to device type Dan Williams 2019-11-07 3:56 ` Dan Williams 2019-11-12 11:28 ` Aneesh Kumar K.V 2019-11-12 11:28 ` Aneesh Kumar K.V 2019-11-07 3:56 ` [PATCH 02/16] libnvdimm: Move region attribute group definition Dan Williams 2019-11-07 3:56 ` Dan Williams 2019-11-12 11:29 ` Aneesh Kumar K.V [this message] 2019-11-12 11:29 ` Aneesh Kumar K.V 2019-11-07 3:56 ` [PATCH 03/16] libnvdimm: Move nd_device_attribute_group to device_type Dan Williams 2019-11-07 3:56 ` Dan Williams 2019-11-12 11:30 ` Aneesh Kumar K.V 2019-11-12 11:30 ` Aneesh Kumar K.V 2019-11-07 3:56 ` [PATCH 04/16] libnvdimm: Move nd_numa_attribute_group " Dan Williams 2019-11-07 3:56 ` Dan Williams 2019-11-12 9:22 ` Aneesh Kumar K.V 2019-11-12 9:22 ` Aneesh Kumar K.V 2019-11-13 1:26 ` Dan Williams 2019-11-13 1:26 ` Dan Williams 2019-11-13 1:26 ` Dan Williams 2019-11-13 6:02 ` Aneesh Kumar K.V 2019-11-13 6:02 ` Aneesh Kumar K.V 2019-11-13 6:14 ` Dan Williams 2019-11-13 6:14 ` Dan Williams 2019-11-13 6:14 ` Dan Williams 2019-11-07 3:57 ` [PATCH 05/16] libnvdimm: Move nd_region_attribute_group " Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:45 ` Aneesh Kumar K.V 2019-11-12 11:45 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 06/16] libnvdimm: Move nd_mapping_attribute_group " Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:45 ` Aneesh Kumar K.V 2019-11-12 11:45 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 07/16] libnvdimm: Move nvdimm_attribute_group " Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:48 ` Aneesh Kumar K.V 2019-11-12 11:48 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 08/16] libnvdimm: Move nvdimm_bus_attribute_group " Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:48 ` Aneesh Kumar K.V 2019-11-12 11:48 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 09/16] dax: Create a dax device_type Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:49 ` Aneesh Kumar K.V 2019-11-12 11:49 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 10/16] dax: Simplify root read-only definition for the 'resource' attribute Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:49 ` Aneesh Kumar K.V 2019-11-12 11:49 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 11/16] libnvdimm: " Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:50 ` Aneesh Kumar K.V 2019-11-12 11:50 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 12/16] dax: Add numa_node to the default device-dax attributes Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-12 11:50 ` Aneesh Kumar K.V 2019-11-12 11:50 ` Aneesh Kumar K.V 2019-11-07 3:57 ` [PATCH 13/16] acpi/mm: Up-level "map to online node" functionality Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-11 11:30 ` Aneesh Kumar K.V 2019-11-11 11:30 ` Aneesh Kumar K.V 2019-11-11 23:38 ` Dan Williams 2019-11-11 23:38 ` Dan Williams 2019-11-11 23:38 ` Dan Williams 2019-11-07 3:57 ` [PATCH 14/16] x86/numa: Provide a range-to-target_node lookup facility Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-07 3:57 ` [PATCH 15/16] libnvdimm/e820: Drop the wrapper around memory_add_physaddr_to_nid Dan Williams 2019-11-07 3:57 ` Dan Williams 2019-11-07 3:58 ` [PATCH 16/16] libnvdimm/e820: Retrieve and populate correct 'target_node' info Dan Williams 2019-11-07 3:58 ` Dan Williams 2019-11-09 5:02 ` kbuild test robot 2019-11-09 5:02 ` kbuild test robot 2019-11-09 5:02 ` kbuild test robot 2019-11-12 11:42 ` [PATCH 00/16] Memory Hierarchy: Enable target node lookups for reserved memory Aneesh Kumar K.V 2019-11-12 11:42 ` Aneesh Kumar K.V 2019-11-12 19:37 ` Dan Williams 2019-11-12 19:37 ` Dan Williams 2019-11-12 19:37 ` Dan Williams
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=87blthtjsg.fsf@linux.ibm.com \ --to=aneesh.kumar@linux.ibm.com \ --cc=dan.j.williams@intel.com \ --cc=dave.hansen@linux.intel.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linux-nvdimm@lists.01.org \ --cc=peterz@infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.