From: Marcelo Tosatti <mtosatti@redhat.com>
To: Christoph Lameter <cl@linux.com>
Cc: Aaron Tomlin <atomlin@atomlin.com>,
Frederic Weisbecker <frederic@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Russell King <linux@armlinux.org.uk>,
Huacai Chen <chenhuacai@kernel.org>,
Heiko Carstens <hca@linux.ibm.com>,
x86@kernel.org, Vlastimil Babka <vbabka@suse.cz>,
Michal Hocko <mhocko@suse.com>,
Marcelo Tosatti <mtosatti@redhat.com>
Subject: [PATCH v7 09/13] vmstat: switch per-cpu vmstat counters to 32-bits
Date: Mon, 20 Mar 2023 15:03:41 -0300 [thread overview]
Message-ID: <20230320180745.758267946@redhat.com> (raw)
In-Reply-To: 20230320180332.102837832@redhat.com
Some architectures only provide xchg/cmpxchg in 32/64-bit quantities.
Since the next patch is about to use xchg on per-CPU vmstat counters,
switch them to s32.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
---
Index: linux-vmstat-remote/include/linux/mmzone.h
===================================================================
--- linux-vmstat-remote.orig/include/linux/mmzone.h
+++ linux-vmstat-remote/include/linux/mmzone.h
@@ -689,8 +689,8 @@ struct per_cpu_pages {
struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
- s8 stat_threshold;
+ s32 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s32 stat_threshold;
#endif
#ifdef CONFIG_NUMA
/*
@@ -703,8 +703,8 @@ struct per_cpu_zonestat {
};
struct per_cpu_nodestat {
- s8 stat_threshold;
- s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
+ s32 stat_threshold;
+ s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};
#endif /* !__GENERATING_BOUNDS.H */
Index: linux-vmstat-remote/mm/vmstat.c
===================================================================
--- linux-vmstat-remote.orig/mm/vmstat.c
+++ linux-vmstat-remote/mm/vmstat.c
@@ -351,7 +351,7 @@ static inline void mod_zone_state(struct
long delta, int overstep_mode)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;
do {
@@ -428,7 +428,7 @@ static inline void mod_node_state(struct
int delta, int overstep_mode)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long o, n, t, z;
if (vmstat_item_in_bytes(item)) {
@@ -525,7 +525,7 @@ void __mod_zone_page_state(struct zone *
long delta)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
@@ -556,7 +556,7 @@ void __mod_node_page_state(struct pglist
long delta)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long x;
long t;
@@ -614,8 +614,8 @@ EXPORT_SYMBOL(__mod_node_page_state);
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
+ s32 v, t;
/* See __mod_node_page_state */
preempt_disable_nested();
@@ -623,7 +623,7 @@ void __inc_zone_state(struct zone *zone,
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
@@ -635,8 +635,8 @@ void __inc_zone_state(struct zone *zone,
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
@@ -646,7 +646,7 @@ void __inc_node_state(struct pglist_data
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
@@ -670,8 +670,8 @@ EXPORT_SYMBOL(__inc_node_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
+ s32 v, t;
/* See __mod_node_page_state */
preempt_disable_nested();
@@ -679,7 +679,7 @@ void __dec_zone_state(struct zone *zone,
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
@@ -691,8 +691,8 @@ void __dec_zone_state(struct zone *zone,
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
@@ -702,7 +702,7 @@ void __dec_node_state(struct pglist_data
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
next prev parent reply other threads:[~2023-03-20 18:21 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-20 18:03 [PATCH v7 00/13] fold per-CPU vmstats remotely Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 01/13] vmstat: allow_direct_reclaim should use zone_page_state_snapshot Marcelo Tosatti
2023-03-20 18:21 ` Michal Hocko
2023-03-20 18:32 ` Marcelo Tosatti
2023-03-22 10:03 ` Michal Hocko
2023-03-20 18:03 ` [PATCH v7 02/13] this_cpu_cmpxchg: ARM64: switch this_cpu_cmpxchg to locked, add _local function Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 03/13] this_cpu_cmpxchg: loongarch: " Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 04/13] this_cpu_cmpxchg: S390: " Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 05/13] this_cpu_cmpxchg: x86: " Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 06/13] add this_cpu_cmpxchg_local and asm-generic definitions Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 07/13] convert this_cpu_cmpxchg users to this_cpu_cmpxchg_local Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 08/13] mm/vmstat: switch counter modification to cmpxchg Marcelo Tosatti
2023-03-20 18:03 ` Marcelo Tosatti [this message]
2023-03-20 18:03 ` [PATCH v7 10/13] mm/vmstat: use xchg in cpu_vm_stats_fold Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 11/13] mm/vmstat: switch vmstat shepherd to flush per-CPU counters remotely Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 12/13] mm/vmstat: refresh stats remotely instead of via work item Marcelo Tosatti
2023-03-20 18:03 ` [PATCH v7 13/13] vmstat: add pcp remote node draining via cpu_vm_stats_fold Marcelo Tosatti
2023-03-20 20:43 ` Tim Chen
2023-03-22 1:20 ` Marcelo Tosatti
2023-03-20 18:25 ` [PATCH v7 00/13] fold per-CPU vmstats remotely Michal Hocko
2023-03-20 19:07 ` Marcelo Tosatti
2023-03-22 10:13 ` Michal Hocko
2023-03-22 11:23 ` Marcelo Tosatti
2023-03-22 13:35 ` Michal Hocko
2023-03-22 14:20 ` Marcelo Tosatti
2023-03-23 7:51 ` Michal Hocko
2023-03-23 10:52 ` Marcelo Tosatti
2023-03-23 10:59 ` Marcelo Tosatti
2023-03-23 12:17 ` Michal Hocko
2023-03-23 13:30 ` Marcelo Tosatti
2023-03-23 13:32 ` Marcelo Tosatti
2023-04-18 22:02 ` Andrew Morton
2023-04-19 11:14 ` Marcelo Tosatti
2023-04-19 11:15 ` Marcelo Tosatti
2023-04-19 13:44 ` Andrew Theurer
2023-04-20 7:55 ` Michal Hocko
2023-04-23 1:25 ` Marcelo Tosatti
2023-04-19 11:29 ` Marcelo Tosatti
2023-04-19 11:59 ` Marcelo Tosatti
2023-04-19 12:24 ` Frederic Weisbecker
2023-04-19 13:48 ` Marcelo Tosatti
2023-04-19 14:35 ` Michal Hocko
2023-04-19 16:35 ` Marcelo Tosatti
2023-04-20 8:40 ` Michal Hocko
2023-04-23 1:10 ` Marcelo Tosatti
2023-04-20 13:45 ` Marcelo Tosatti
2023-04-26 14:34 ` Marcelo Tosatti
2023-04-27 8:31 ` Michal Hocko
2023-04-27 14:59 ` Marcelo Tosatti
2023-04-26 15:04 ` Vlastimil Babka
2023-04-26 16:10 ` Marcelo Tosatti
2023-04-27 8:39 ` Michal Hocko
2023-04-27 16:25 ` Marcelo Tosatti
2023-04-19 16:47 ` Vlastimil Babka
2023-04-19 19:15 ` Marcelo Tosatti
2023-05-03 13:51 ` Marcelo Tosatti
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230320180745.758267946@redhat.com \
--to=mtosatti@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=atomlin@atomlin.com \
--cc=chenhuacai@kernel.org \
--cc=cl@linux.com \
--cc=frederic@kernel.org \
--cc=hca@linux.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@armlinux.org.uk \
--cc=mhocko@suse.com \
--cc=vbabka@suse.cz \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).