From: Eric DeVolder <eric.devolder@oracle.com>
To: linux-kernel@vger.kernel.org, david@redhat.com,
osalvador@suse.de, corbet@lwn.net, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
x86@kernel.org, bhe@redhat.com, ebiederm@xmission.com,
kexec@lists.infradead.org
Cc: hpa@zytor.com, gregkh@linuxfoundation.org, rafael@kernel.org,
vgoyal@redhat.com, dyoung@redhat.com, lf32.dev@gmail.com,
akpm@linux-foundation.org, naveen.n.rao@linux.vnet.ibm.com,
zohar@linux.ibm.com, bhelgaas@google.com, vbabka@suse.cz,
tiwai@suse.de, seanjc@google.com, linux@weissschuh.net,
vschneid@redhat.com, linux-mm@kvack.org,
linux-doc@vger.kernel.org, sourabhjain@linux.ibm.com,
konrad.wilk@oracle.com, boris.ostrovsky@oracle.com,
eric.devolder@oracle.com
Subject: [PATCH v25 02/10] drivers/base: refactor memory.c to use .is_visible()
Date: Thu, 29 Jun 2023 15:21:11 -0400 [thread overview]
Message-ID: <20230629192119.6613-3-eric.devolder@oracle.com> (raw)
In-Reply-To: <20230629192119.6613-1-eric.devolder@oracle.com>
Greg Kroah-Hartman requested that this file use the .is_visible()
method instead of #ifdefs for the attributes in memory.c.
static struct attribute *memory_memblk_attrs[] = {
&dev_attr_phys_index.attr,
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
#ifdef CONFIG_MEMORY_HOTREMOVE
&dev_attr_valid_zones.attr,
#endif
NULL
};
and
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
#endif
#ifdef CONFIG_MEMORY_FAILURE
&dev_attr_soft_offline_page.attr,
&dev_attr_hard_offline_page.attr,
#endif
&dev_attr_block_size_bytes.attr,
&dev_attr_auto_online_blocks.attr,
NULL
};
To that end:
- the .is_visible() method is implemented, and IS_ENABLED(), rather
than #ifdef, is used to determine the visibility of the attribute.
- the DEVICE_ATTR_xx() attributes are moved outside of #ifdefs, so that
those structs are always present for the memory_memblk_attrs[] and
memory_root_attrs[].
- the function body of the callback functions are now wrapped with
IS_ENABLED(); as the callback function must exist now that the
attribute is always compiled-in (though not necessarily visible).
No functionality change intended.
Signed-off-by: Eric DeVolder <eric.devolder@oracle.com>
---
drivers/base/memory.c | 229 ++++++++++++++++++++++++++----------------
1 file changed, 140 insertions(+), 89 deletions(-)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b456ac213610..7294112fe646 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -391,62 +391,66 @@ static ssize_t phys_device_show(struct device *dev,
arch_get_memory_phys_device(start_pfn));
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
static int print_allowed_zone(char *buf, int len, int nid,
struct memory_group *group,
unsigned long start_pfn, unsigned long nr_pages,
int online_type, struct zone *default_zone)
{
- struct zone *zone;
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) {
+ struct zone *zone;
- zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
- if (zone == default_zone)
- return 0;
+ zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
+ if (zone == default_zone)
+ return 0;
- return sysfs_emit_at(buf, len, " %s", zone->name);
+ return sysfs_emit_at(buf, len, " %s", zone->name);
+ }
+ return 0;
}
static ssize_t valid_zones_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem = to_memory_block(dev);
- unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
- unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct memory_group *group = mem->group;
- struct zone *default_zone;
- int nid = mem->nid;
- int len = 0;
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) {
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct memory_group *group = mem->group;
+ struct zone *default_zone;
+ int nid = mem->nid;
+ int len = 0;
- /*
- * Check the existing zone. Make sure that we do that only on the
- * online nodes otherwise the page_zone is not reliable
- */
- if (mem->state == MEM_ONLINE) {
/*
- * If !mem->zone, the memory block spans multiple zones and
- * cannot get offlined.
- */
- default_zone = mem->zone;
- if (!default_zone)
- return sysfs_emit(buf, "%s\n", "none");
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- goto out;
- }
+ * Check the existing zone. Make sure that we do that only on the
+ * online nodes otherwise the page_zone is not reliable
+ */
+ if (mem->state == MEM_ONLINE) {
+ /*
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
+ */
+ default_zone = mem->zone;
+ if (!default_zone)
+ return sysfs_emit(buf, "%s\n", "none");
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ goto out;
+ }
- default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
- start_pfn, nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+ start_pfn, nr_pages);
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
- MMOP_ONLINE_KERNEL, default_zone);
- len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
- MMOP_ONLINE_MOVABLE, default_zone);
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
+ MMOP_ONLINE_KERNEL, default_zone);
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
+ MMOP_ONLINE_MOVABLE, default_zone);
out:
- len += sysfs_emit_at(buf, len, "\n");
- return len;
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+ }
+ return 0;
}
static DEVICE_ATTR_RO(valid_zones);
-#endif
static DEVICE_ATTR_RO(phys_index);
static DEVICE_ATTR_RW(state);
@@ -496,43 +500,43 @@ static DEVICE_ATTR_RW(auto_online_blocks);
* as well as ppc64 will do all of their discovery in userspace
* and will require this interface.
*/
-#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- u64 phys_addr;
- int nid, ret;
- unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
+ if (IS_ENABLED(CONFIG_ARCH_MEMORY_PROBE)) {
+ u64 phys_addr;
+ int nid, ret;
+ unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
- ret = kstrtoull(buf, 0, &phys_addr);
- if (ret)
- return ret;
+ ret = kstrtoull(buf, 0, &phys_addr);
+ if (ret)
+ return ret;
- if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
- return -EINVAL;
+ if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+ return -EINVAL;
- ret = lock_device_hotplug_sysfs();
- if (ret)
- return ret;
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
- nid = memory_add_physaddr_to_nid(phys_addr);
- ret = __add_memory(nid, phys_addr,
- MIN_MEMORY_BLOCK_SIZE * sections_per_block,
- MHP_NONE);
+ nid = memory_add_physaddr_to_nid(phys_addr);
+ ret = __add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block,
+ MHP_NONE);
- if (ret)
- goto out;
+ if (ret)
+ goto out;
- ret = count;
+ ret = count;
out:
- unlock_device_hotplug();
- return ret;
+ unlock_device_hotplug();
+ return ret;
+ }
+ return 0;
}
static DEVICE_ATTR_WO(probe);
-#endif
-#ifdef CONFIG_MEMORY_FAILURE
/*
* Support for offlining pages of memory
*/
@@ -542,15 +546,19 @@ static ssize_t soft_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret;
- u64 pfn;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (kstrtoull(buf, 0, &pfn) < 0)
- return -EINVAL;
- pfn >>= PAGE_SHIFT;
- ret = soft_offline_page(pfn, 0);
- return ret == 0 ? count : ret;
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ int ret;
+ u64 pfn;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (kstrtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = soft_offline_page(pfn, 0);
+ return ret == 0 ? count : ret;
+ }
+ return 0;
}
/* Forcibly offline a page, including killing processes. */
@@ -558,22 +566,25 @@ static ssize_t hard_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret;
- u64 pfn;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (kstrtoull(buf, 0, &pfn) < 0)
- return -EINVAL;
- pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, MF_SW_SIMULATED);
- if (ret == -EOPNOTSUPP)
- ret = 0;
- return ret ? ret : count;
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ int ret;
+ u64 pfn;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (kstrtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = memory_failure(pfn, MF_SW_SIMULATED);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret ? ret : count;
+ }
+ return 0;
}
static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
-#endif
/* See phys_device_show(). */
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
@@ -611,14 +622,35 @@ static struct attribute *memory_memblk_attrs[] = {
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
-#ifdef CONFIG_MEMORY_HOTREMOVE
&dev_attr_valid_zones.attr,
-#endif
NULL
};
+static umode_t
+memory_memblk_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_phys_index.attr)
+ return mode;
+ if (attr == &dev_attr_state.attr)
+ return mode;
+ if (attr == &dev_attr_phys_device.attr)
+ return mode;
+ if (attr == &dev_attr_removable.attr)
+ return mode;
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) {
+ if (attr == &dev_attr_valid_zones.attr)
+ return mode;
+ }
+
+ return 0;
+}
+
static const struct attribute_group memory_memblk_attr_group = {
.attrs = memory_memblk_attrs,
+ .is_visible = memory_memblk_attr_is_visible,
};
static const struct attribute_group *memory_memblk_attr_groups[] = {
@@ -878,22 +910,41 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
}
static struct attribute *memory_root_attrs[] = {
-#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
-#endif
-
-#ifdef CONFIG_MEMORY_FAILURE
&dev_attr_soft_offline_page.attr,
&dev_attr_hard_offline_page.attr,
-#endif
-
&dev_attr_block_size_bytes.attr,
&dev_attr_auto_online_blocks.attr,
NULL
};
+static umode_t
+memory_root_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ umode_t mode = attr->mode;
+
+ if (IS_ENABLED(CONFIG_ARCH_MEMORY_PROBE)) {
+ if (attr == &dev_attr_probe.attr)
+ return mode;
+ }
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ if (attr == &dev_attr_soft_offline_page.attr)
+ return mode;
+ if (attr == &dev_attr_hard_offline_page.attr)
+ return mode;
+ }
+ if (attr == &dev_attr_block_size_bytes.attr)
+ return mode;
+ if (attr == &dev_attr_auto_online_blocks.attr)
+ return mode;
+
+ return 0;
+}
+
static const struct attribute_group memory_root_attr_group = {
.attrs = memory_root_attrs,
+ .is_visible = memory_root_attr_is_visible,
};
static const struct attribute_group *memory_root_attr_groups[] = {
--
2.31.1
next prev parent reply other threads:[~2023-06-29 19:21 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-29 19:21 [PATCH v25 00/10] crash: Kernel handling of CPU and memory hot un/plug Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 01/10] drivers/base: refactor cpu.c to use .is_visible() Eric DeVolder
2023-07-03 13:05 ` Greg KH
2023-07-03 16:53 ` Eric DeVolder
2023-07-21 16:32 ` Eric DeVolder
2023-08-03 18:20 ` Eric DeVolder
2023-08-03 18:36 ` Greg KH
2023-06-29 19:21 ` Eric DeVolder [this message]
2023-06-29 19:21 ` [PATCH v25 03/10] crash: move a few code bits to setup support of crash hotplug Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 04/10] crash: add generic infrastructure for crash hotplug support Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 05/10] kexec: exclude elfcorehdr from the segment digest Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 06/10] crash: memory and CPU hotplug sysfs attributes Eric DeVolder
2023-06-29 20:59 ` Randy Dunlap
2023-06-29 22:31 ` Eric DeVolder
2023-06-29 23:20 ` Randy Dunlap
2023-07-03 13:07 ` Greg KH
2023-07-03 16:57 ` Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 07/10] x86/crash: add x86 crash hotplug support Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 08/10] crash: hotplug support for kexec_load() Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 09/10] crash: change crash_prepare_elf64_headers() to for_each_possible_cpu() Eric DeVolder
2023-06-29 19:21 ` [PATCH v25 10/10] x86/crash: optimize CPU changes Eric DeVolder
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230629192119.6613-3-eric.devolder@oracle.com \
--to=eric.devolder@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=bhe@redhat.com \
--cc=bhelgaas@google.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bp@alien8.de \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=dyoung@redhat.com \
--cc=ebiederm@xmission.com \
--cc=gregkh@linuxfoundation.org \
--cc=hpa@zytor.com \
--cc=kexec@lists.infradead.org \
--cc=konrad.wilk@oracle.com \
--cc=lf32.dev@gmail.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@weissschuh.net \
--cc=mingo@redhat.com \
--cc=naveen.n.rao@linux.vnet.ibm.com \
--cc=osalvador@suse.de \
--cc=rafael@kernel.org \
--cc=seanjc@google.com \
--cc=sourabhjain@linux.ibm.com \
--cc=tglx@linutronix.de \
--cc=tiwai@suse.de \
--cc=vbabka@suse.cz \
--cc=vgoyal@redhat.com \
--cc=vschneid@redhat.com \
--cc=x86@kernel.org \
--cc=zohar@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).