linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
To: LKML <linux-kernel@vger.kernel.org>, RCU <rcu@vger.kernel.org>,
	"Paul E . McKenney" <paulmck@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Axtens <dja@axtens.net>,
	Frederic Weisbecker <frederic@kernel.org>,
	Neeraj Upadhyay <neeraju@codeaurora.org>,
	Joel Fernandes <joel@joelfernandes.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	"Theodore Y . Ts'o" <tytso@mit.edu>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Uladzislau Rezki <urezki@gmail.com>,
	Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Subject: [PATCH 3/6] kvfree_rcu: introduce "flags" variable
Date: Wed, 14 Apr 2021 14:12:23 +0200	[thread overview]
Message-ID: <20210414121226.2650-3-urezki@gmail.com> (raw)
In-Reply-To: <20210414121226.2650-1-urezki@gmail.com>

We have a few extra variables within kfree_rcu_cpu structure
which are control ones and behave as regular booleans. Instead
we can pack them into only one, define bit descriptions which
will represent an individual boolean state.

This reduces the size of the per-cpu kfree_rcu_cpu structure.
To access to the flags variable atomic bit operations are used.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 kernel/rcu/tree.c | 61 ++++++++++++++++++++++++++++-------------------
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1b0289fa1cdd..31ee820c3d9e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3139,6 +3139,24 @@ struct kfree_rcu_cpu_work {
 	struct kfree_rcu_cpu *krcp;
 };
 
+// The per-cpu kfree_rcu_cpu structure was initialized.
+// It is set only once when a system is up and running.
+#define KRC_INITIALIZED	0x1
+
+// Indicates that a page_cache_work has been initialized
+// and is about to be queued for execution. The flag is
+// cleared on exit of the worker function.
+#define KRC_CACHE_WORK_RUN	0x2
+
+// A page shrinker can ask for freeing extra pages to get
+// them available for other needs in a system. Usually it
+// happens under low memory condition, in that case hold
+// on a bit with page cache filling.
+#define KRC_DELAY_CACHE_FILL	0x4
+
+// Tracks whether a "monitor_work" delayed work is pending
+#define KRC_MONITOR_TODO	0x8
+
 /**
  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
  * @head: List of kfree_rcu() objects not yet waiting for a grace period
@@ -3146,17 +3164,14 @@ struct kfree_rcu_cpu_work {
  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
  * @lock: Synchronize access to this structure
  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
- * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
- * @initialized: The @rcu_work fields have been initialized
  * @count: Number of objects for which GP not started
+ * @flags: Atomic flags which describe different states
  * @bkvcache:
  *	A simple cache list that contains objects for reuse purpose.
  *	In order to save some per-cpu space the list is singular.
  *	Even though it is lockless an access has to be protected by the
  *	per-cpu lock.
  * @page_cache_work: A work to refill the cache when it is empty
- * @backoff_page_cache_fill: Delay a cache filling
- * @work_in_progress: Indicates that page_cache_work is running
  * @hrtimer: A hrtimer for scheduling a page_cache_work
  * @nr_bkv_objs: number of allocated objects at @bkvcache.
  *
@@ -3171,13 +3186,10 @@ struct kfree_rcu_cpu {
 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
 	raw_spinlock_t lock;
 	struct delayed_work monitor_work;
-	bool monitor_todo;
-	bool initialized;
 	int count;
 
+	unsigned long flags;
 	struct delayed_work page_cache_work;
-	atomic_t backoff_page_cache_fill;
-	atomic_t work_in_progress;
 	struct hrtimer hrtimer;
 
 	struct llist_head bkvcache;
@@ -3415,7 +3427,8 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
 					  unsigned long flags)
 {
 	// Attempt to start a new batch.
-	krcp->monitor_todo = false;
+	clear_bit(KRC_MONITOR_TODO, &krcp->flags);
+
 	if (queue_kfree_rcu_work(krcp)) {
 		// Success! Our job is done here.
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3423,7 +3436,7 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
 	}
 
 	// Previous RCU batch still in progress, try again later.
-	krcp->monitor_todo = true;
+	set_bit(KRC_MONITOR_TODO, &krcp->flags);
 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
 }
@@ -3439,7 +3452,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
 						 monitor_work.work);
 
 	raw_spin_lock_irqsave(&krcp->lock, flags);
-	if (krcp->monitor_todo)
+	if (test_bit(KRC_MONITOR_TODO, &krcp->flags))
 		kfree_rcu_drain_unlock(krcp, flags);
 	else
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3466,7 +3479,7 @@ static void fill_page_cache_func(struct work_struct *work)
 	bool pushed;
 	int i;
 
-	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
+	nr_pages = test_bit(KRC_DELAY_CACHE_FILL, &krcp->flags) ?
 		1 : rcu_min_cached_objs;
 
 	for (i = 0; i < nr_pages; i++) {
@@ -3485,16 +3498,16 @@ static void fill_page_cache_func(struct work_struct *work)
 		}
 	}
 
-	atomic_set(&krcp->work_in_progress, 0);
-	atomic_set(&krcp->backoff_page_cache_fill, 0);
+	clear_bit(KRC_CACHE_WORK_RUN, &krcp->flags);
+	clear_bit(KRC_DELAY_CACHE_FILL, &krcp->flags);
 }
 
 static void
 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
 {
 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-			!atomic_xchg(&krcp->work_in_progress, 1)) {
-		if (atomic_read(&krcp->backoff_page_cache_fill)) {
+			!test_and_set_bit(KRC_CACHE_WORK_RUN, &krcp->flags)) {
+		if (test_bit(KRC_DELAY_CACHE_FILL, &krcp->flags)) {
 			queue_delayed_work(system_wq,
 				&krcp->page_cache_work,
 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
@@ -3520,7 +3533,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
 	int idx;
 
 	*krcp = krc_this_cpu_lock(flags);
-	if (unlikely(!(*krcp)->initialized))
+	if (unlikely(!test_bit(KRC_INITIALIZED, &(*krcp)->flags)))
 		return false;
 
 	idx = !!is_vmalloc_addr(ptr);
@@ -3628,10 +3641,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-	    !krcp->monitor_todo) {
-		krcp->monitor_todo = true;
+			!test_and_set_bit(KRC_MONITOR_TODO, &krcp->flags))
 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
-	}
 
 unlock_return:
 	krc_this_cpu_unlock(krcp, flags);
@@ -3661,7 +3672,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 
 		count += READ_ONCE(krcp->count);
 		count += READ_ONCE(krcp->nr_bkv_objs);
-		atomic_set(&krcp->backoff_page_cache_fill, 1);
+		set_bit(KRC_DELAY_CACHE_FILL, &krcp->flags);
 	}
 
 	return count;
@@ -3681,7 +3692,7 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		count += drain_page_cache(krcp);
 
 		raw_spin_lock_irqsave(&krcp->lock, flags);
-		if (krcp->monitor_todo)
+		if (test_bit(KRC_MONITOR_TODO, &krcp->flags))
 			kfree_rcu_drain_unlock(krcp, flags);
 		else
 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3712,11 +3723,11 @@ void __init kfree_rcu_scheduler_running(void)
 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
 		raw_spin_lock_irqsave(&krcp->lock, flags);
-		if (!krcp->head || krcp->monitor_todo) {
+		if (!krcp->head || test_and_set_bit(KRC_MONITOR_TODO, &krcp->flags)) {
 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
 			continue;
 		}
-		krcp->monitor_todo = true;
+
 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
 					 KFREE_DRAIN_JIFFIES);
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -4655,7 +4666,7 @@ static void __init kfree_rcu_batch_init(void)
 
 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
-		krcp->initialized = true;
+		set_bit(KRC_INITIALIZED, &krcp->flags);
 	}
 	if (register_shrinker(&kfree_rcu_shrinker))
 		pr_err("Failed to register kfree_rcu() shrinker!\n");
-- 
2.20.1


  parent reply	other threads:[~2021-04-14 12:12 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-14 12:12 [PATCH 1/6] kvfree_rcu: Release a page cache under memory pressure Uladzislau Rezki (Sony)
2021-04-14 12:12 ` [PATCH 2/6] kvfree_rcu: use [READ/WRITE]_ONCE() macros to access to nr_bkv_objs Uladzislau Rezki (Sony)
2021-04-14 12:12 ` Uladzislau Rezki (Sony) [this message]
2021-04-14 12:12 ` [PATCH 4/6] kvfree_rcu: add a bulk-list check when a scheduler is run Uladzislau Rezki (Sony)
2021-04-14 12:12 ` [PATCH 5/6] kvfree_rcu: clear KRC_MONITOR_TODO bit once a batch is started Uladzislau Rezki (Sony)
2021-04-14 12:12 ` [PATCH 6/6] kvfree_rcu: use kfree_rcu_monitor() instead of open-coded variant Uladzislau Rezki (Sony)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210414121226.2650-3-urezki@gmail.com \
    --to=urezki@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=bigeasy@linutronix.de \
    --cc=dja@axtens.net \
    --cc=frederic@kernel.org \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mhocko@suse.com \
    --cc=neeraju@codeaurora.org \
    --cc=oleksiy.avramchenko@sonymobile.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rcu@vger.kernel.org \
    --cc=tglx@linutronix.de \
    --cc=tytso@mit.edu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).