All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org,
	linux-rt-users <linux-rt-users@vger.kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Carsten Emde <C.Emde@osadl.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	John Kacur <jkacur@redhat.com>, Julia Cartwright <julia@ni.com>,
	Daniel Wagner <wagi@monom.org>, Tom Zanussi <zanussi@kernel.org>,
	stable-rt@vger.kernel.org, Liu Haitao <haitao.liu@windriver.com>,
	Yongxin Liu <yongxin.liu@windriver.com>
Subject: [PATCH RT 21/30] kmemleak: Change the lock of kmemleak_object to raw_spinlock_t
Date: Thu, 23 Jan 2020 15:39:51 -0500	[thread overview]
Message-ID: <20200123203945.314725398@goodmis.org> (raw)
In-Reply-To: 20200123203930.646725253@goodmis.org

4.19.94-rt39-rc2 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Liu Haitao <haitao.liu@windriver.com>

[ Upstream commit 217847f57119b5fdd377bfa3d344613ddb98d9fc ]

The commit ("kmemleak: Turn kmemleak_lock to raw spinlock on RT")
changed the kmemleak_lock to raw spinlock. However the
kmemleak_object->lock is held after the kmemleak_lock is held in
scan_block().

Make the object->lock a raw_spinlock_t.

Cc: stable-rt@vger.kernel.org
Link: https://lkml.kernel.org/r/20190927082230.34152-1-yongxin.liu@windriver.com
Signed-off-by: Liu Haitao <haitao.liu@windriver.com>
Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 mm/kmemleak.c | 72 +++++++++++++++++++++++++--------------------------
 1 file changed, 36 insertions(+), 36 deletions(-)

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 92ce99b15f2b..e5f5eeed338d 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -147,7 +147,7 @@ struct kmemleak_scan_area {
  * (use_count) and freed using the RCU mechanism.
  */
 struct kmemleak_object {
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	unsigned int flags;		/* object status flags */
 	struct list_head object_list;
 	struct list_head gray_list;
@@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 	INIT_LIST_HEAD(&object->object_list);
 	INIT_LIST_HEAD(&object->gray_list);
 	INIT_HLIST_HEAD(&object->area_list);
-	spin_lock_init(&object->lock);
+	raw_spin_lock_init(&object->lock);
 	atomic_set(&object->use_count, 1);
 	object->flags = OBJECT_ALLOCATED;
 	object->pointer = ptr;
@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object)
 	 * Locking here also ensures that the corresponding memory block
 	 * cannot be freed when it is being scanned.
 	 */
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->flags &= ~OBJECT_ALLOCATED;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	__paint_it(object, color);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 static void paint_ptr(unsigned long ptr, int color)
@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 		goto out;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if (size == SIZE_MAX) {
 		size = object->pointer + object->size - ptr;
 	} else if (ptr + size > object->pointer + object->size) {
@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 
 	hlist_add_head(&area->node, &object->area_list);
 out_unlock:
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 out:
 	put_object(object);
 }
@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->excess_ref = excess_ref;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->flags |= OBJECT_NO_SCAN;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log)
 			       log->min_count, GFP_ATOMIC);
 	if (!object)
 		goto out;
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	for (i = 0; i < log->trace_len; i++)
 		object->trace[i] = log->trace[i];
 	object->trace_len = log->trace_len;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 out:
 	rcu_read_unlock();
 }
@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->trace_len = __save_stack_trace(object->trace);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 
 	put_object(object);
 }
@@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end,
 		 * previously acquired in scan_object(). These locks are
 		 * enclosed by scan_mutex.
 		 */
-		spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
 		/* only pass surplus references (object already gray) */
 		if (color_gray(object)) {
 			excess_ref = object->excess_ref;
@@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end,
 			excess_ref = 0;
 			update_refs(object);
 		}
-		spin_unlock(&object->lock);
+		raw_spin_unlock(&object->lock);
 
 		if (excess_ref) {
 			object = lookup_object(excess_ref, 0);
@@ -1362,9 +1362,9 @@ static void scan_block(void *_start, void *_end,
 			if (object == scanned)
 				/* circular reference, ignore */
 				continue;
-			spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
 			update_refs(object);
-			spin_unlock(&object->lock);
+			raw_spin_unlock(&object->lock);
 		}
 	}
 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
@@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object)
 	 * Once the object->lock is acquired, the corresponding memory block
 	 * cannot be freed (the same lock is acquired in delete_object).
 	 */
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if (object->flags & OBJECT_NO_SCAN)
 		goto out;
 	if (!(object->flags & OBJECT_ALLOCATED))
@@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object)
 			if (start >= end)
 				break;
 
-			spin_unlock_irqrestore(&object->lock, flags);
+			raw_spin_unlock_irqrestore(&object->lock, flags);
 			cond_resched();
-			spin_lock_irqsave(&object->lock, flags);
+			raw_spin_lock_irqsave(&object->lock, flags);
 		} while (object->flags & OBJECT_ALLOCATED);
 	} else
 		hlist_for_each_entry(area, &object->area_list, node)
@@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object)
 				   (void *)(area->start + area->size),
 				   object);
 out:
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 /*
@@ -1482,7 +1482,7 @@ static void kmemleak_scan(void)
 	/* prepare the kmemleak_object's */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 #ifdef DEBUG
 		/*
 		 * With a few exceptions there should be a maximum of
@@ -1499,7 +1499,7 @@ static void kmemleak_scan(void)
 		if (color_gray(object) && get_object(object))
 			list_add_tail(&object->gray_list, &gray_list);
 
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1564,14 +1564,14 @@ static void kmemleak_scan(void)
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
 		    && update_checksum(object) && get_object(object)) {
 			/* color it gray temporarily */
 			object->count = object->min_count;
 			list_add_tail(&object->gray_list, &gray_list);
 		}
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1591,13 +1591,13 @@ static void kmemleak_scan(void)
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if (unreferenced_object(object) &&
 		    !(object->flags & OBJECT_REPORTED)) {
 			object->flags |= OBJECT_REPORTED;
 			new_leaks++;
 		}
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
 	struct kmemleak_object *object = v;
 	unsigned long flags;
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
 		print_unreferenced(seq, object);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	return 0;
 }
 
@@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str)
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	dump_object_info(object);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 
 	put_object(object);
 	return 0;
@@ -1803,11 +1803,11 @@ static void kmemleak_clear(void)
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if ((object->flags & OBJECT_REPORTED) &&
 		    unreferenced_object(object))
 			__paint_it(object, KMEMLEAK_GREY);
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
-- 
2.24.1



  parent reply	other threads:[~2020-01-23 20:40 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-23 20:39 [PATCH RT 00/30] Linux 4.19.94-rt39-rc2 Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 01/30] i2c: exynos5: Remove IRQF_ONESHOT Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 02/30] i2c: hix5hd2: " Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 03/30] sched/deadline: Ensure inactive_timer runs in hardirq context Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 04/30] thermal/x86_pkg_temp: make pkg_temp_lock a raw spinlock Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 05/30] dma-buf: Use seqlock_t instread disabling preemption Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 06/30] KVM: arm/arm64: Let the timer expire in hardirq context on RT Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 07/30] x86: preempt: Check preemption level before looking at lazy-preempt Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 08/30] hrtimer: Use READ_ONCE to access timer->base in hrimer_grab_expiry_lock() Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 09/30] hrtimer: Dont grab the expiry lock for non-soft hrtimer Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 10/30] hrtimer: Prevent using hrtimer_grab_expiry_lock() on migration_base Steven Rostedt
2020-04-27 13:10   ` Rasmus Villemoes
2020-04-27 19:06     ` Steven Rostedt
2020-04-27 19:26       ` Tom Zanussi
2020-04-28  6:51         ` Rasmus Villemoes
2020-04-28  7:03   ` Rasmus Villemoes
2020-04-28 12:59     ` Tom Zanussi
2020-04-28 13:07       ` Rasmus Villemoes
2020-04-28 13:43         ` Tom Zanussi
2020-01-23 20:39 ` [PATCH RT 11/30] hrtimer: Add a missing bracket and hide `migration_base on !SMP Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 12/30] posix-timers: Unlock expiry lock in the early return Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 14/30] sched: __set_cpus_allowed_ptr: Check cpus_mask, not cpus_ptr Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 15/30] sched: Remove dead __migrate_disabled() check Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 16/30] sched: migrate disable: Protect cpus_ptr with lock Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 17/30] lib/smp_processor_id: Dont use cpumask_equal() Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 18/30] futex: Make the futex_hash_bucket spinlock_t again and bring back its old state Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 19/30] locking/rtmutex: Clean ->pi_blocked_on in the error case Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 20/30] lib/ubsan: Dont seralize UBSAN report Steven Rostedt
2020-01-23 20:39 ` Steven Rostedt [this message]
2020-01-23 20:39 ` [PATCH RT 22/30] sched: migrate_enable: Use select_fallback_rq() Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 23/30] sched: Lazy migrate_disable processing Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 24/30] sched: migrate_enable: Use stop_one_cpu_nowait() Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 25/30] Revert "ARM: Initialize split page table locks for vector page" Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 26/30] locking: Make spinlock_t and rwlock_t a RCU section on RT Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 27/30] sched/core: migrate_enable() must access takedown_cpu_task on !HOTPLUG_CPU Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 28/30] lib/smp_processor_id: Adjust check_preemption_disabled() Steven Rostedt
2020-01-23 20:39 ` [PATCH RT 29/30] sched: migrate_enable: Busy loop until the migration request is completed Steven Rostedt
2020-01-23 20:40 ` [PATCH RT 30/30] Linux 4.19.94-rt39-rc2 Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200123203945.314725398@goodmis.org \
    --to=rostedt@goodmis.org \
    --cc=C.Emde@osadl.org \
    --cc=bigeasy@linutronix.de \
    --cc=haitao.liu@windriver.com \
    --cc=jkacur@redhat.com \
    --cc=julia@ni.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=stable-rt@vger.kernel.org \
    --cc=tglx@linutronix.de \
    --cc=wagi@monom.org \
    --cc=yongxin.liu@windriver.com \
    --cc=zanussi@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.