linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [ANNOUNCE] 4.6.1-rt3
@ 2016-06-03 10:44 Sebastian Andrzej Siewior
  2016-06-05  6:11 ` [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq() Mike Galbraith
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Sebastian Andrzej Siewior @ 2016-06-03 10:44 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, linux-rt-users, Steven Rostedt

Dear RT folks!

I'm pleased to announce the v4.6.1-rt3 patch set. 

Changes since v4.6.1-rt2:
  - On return from interrupt on ARM we could schedule with lazy preempt
    count > 0 under some circumstances. It isn't toxic but it shouldn't
    happen. Noticed by Thomas Gleixner.

  - The way the preempt counter is accessed on non-x86 architectures
    allowed the compiler to reorder the code slightly. This led to
    decrementing the preempt counter, checking for the need resched bit
    followed by writing the counter back. An interrupt between the last
    two steps will lead to a missing preemption point and thus high
    latencies. Patch by Peter Zijlstra.

  - The recorded preemption counter in event trace points (such as
    raw_syscall_entry) are off by one because each trace point
    increments the counter. This has been corrected.

  - It is now ensured that there are no attempts to print from IRQ or
    NMI context. On certain events such as hard-lockup-detector we would
    attempt to grab sleeping locks.

  - Allow lru_add_drain_all() to perform its work remotely. Patch by
    Luiz Capitulino and Rik van Riel.

Known issues
	- CPU hotplug got a little better but can deadlock.

The delta patch against 4.6.1-rt2 is appended below and can be found here:
 
     https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.1-rt2-rt3.patch.xz

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.1-rt3

The RT patch against 4.6.1 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.1-rt3.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.1-rt3.tar.xz

Sebastian

diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 188027584dd1..3125de9e9783 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -244,7 +244,11 @@ ENDPROC(__irq_svc)
 	bne	1b
 	tst	r0, #_TIF_NEED_RESCHED_LAZY
 	reteq	r8				@ go again
-	b	1b
+	ldr	r0, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count
+	teq	r0, #0				@ if preempt lazy count != 0
+	beq	1b
+	ret	r8				@ go again
+
 #endif
 
 __und_fault:
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5d8ffa3e6f8c..c1cde3577551 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
 
 static __always_inline int preempt_count(void)
 {
-	return current_thread_info()->preempt_count;
+	return READ_ONCE(current_thread_info()->preempt_count);
 }
 
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
 {
 	return &current_thread_info()->preempt_count;
 }
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
index 493e801e0c9b..845c77f1a5ca 100644
--- a/include/linux/locallock.h
+++ b/include/linux/locallock.h
@@ -66,6 +66,9 @@ static inline void __local_lock(struct local_irq_lock *lv)
 #define local_lock(lvar)					\
 	do { __local_lock(&get_local_var(lvar)); } while (0)
 
+#define local_lock_on(lvar, cpu)				\
+	do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
+
 static inline int __local_trylock(struct local_irq_lock *lv)
 {
 	if (lv->owner != current && spin_trylock_local(&lv->lock)) {
@@ -104,6 +107,9 @@ static inline void __local_unlock(struct local_irq_lock *lv)
 		put_local_var(lvar);				\
 	} while (0)
 
+#define local_unlock_on(lvar, cpu)                       \
+	do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
+
 static inline void __local_lock_irq(struct local_irq_lock *lv)
 {
 	spin_lock_irqsave(&lv->lock, lv->flags);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index be586c632a0c..12cb3bb40c1c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,6 +33,19 @@ struct trace_enum_map {
 
 #define TRACEPOINT_DEFAULT_PRIO	10
 
+/*
+ * The preempt count recorded in trace_event_raw_event_# are off by one due to
+ * rcu_read_lock_sched_notrace() in __DO_TRACE. This is corrected here.
+ */
+static inline int event_preempt_count(void)
+{
+#ifdef CONFIG_PREEMPT
+	return preempt_count() - 1;
+#else
+	return 0;
+#endif
+}
+
 extern int
 tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
 extern int
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 66971005cc12..fde5e54f1096 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -2059,7 +2059,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
 #ifdef CONFIG_PREEMPT_RT_FULL
-	if (WARN_ON(in_irq() || in_nmi()))
+	if (WARN_ON_ONCE(in_irq() || in_nmi()))
 #else
 	if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
 #endif
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 723bcab97524..ba5e3381a8cc 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1528,6 +1528,11 @@ static void call_console_drivers(int level,
 	if (!console_drivers)
 		return;
 
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
+		if (in_irq() || in_nmi())
+			return;
+	}
+
 	migrate_disable();
 	for_each_console(con) {
 		if (exclusive_console && con != exclusive_console)
@@ -2460,6 +2465,11 @@ void console_unblank(void)
 {
 	struct console *c;
 
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
+		if (in_irq() || in_nmi())
+			return;
+	}
+
 	/*
 	 * console_unblank can no longer be called in interrupt context unless
 	 * oops_in_progress is set to 1..
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 52c4fffaddcd..90b40cf6ec98 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -245,7 +245,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 		return NULL;
 
 	local_save_flags(fbuffer->flags);
-	fbuffer->pc = preempt_count();
+	fbuffer->pc = event_preempt_count();
 	fbuffer->trace_file = trace_file;
 
 	fbuffer->event =
diff --git a/localversion-rt b/localversion-rt
index c3054d08a112..1445cd65885c 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt2
+-rt3
diff --git a/mm/swap.c b/mm/swap.c
index 892747266c7e..d3558eb2f685 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -596,9 +596,15 @@ void lru_add_drain_cpu(int cpu)
 		unsigned long flags;
 
 		/* No harm done if a racing interrupt already did this */
+#ifdef CONFIG_PREEMPT_RT_BASE
+		local_lock_irqsave_on(rotate_lock, flags, cpu);
+		pagevec_move_tail(pvec);
+		local_unlock_irqrestore_on(rotate_lock, flags, cpu);
+#else
 		local_lock_irqsave(rotate_lock, flags);
 		pagevec_move_tail(pvec);
 		local_unlock_irqrestore(rotate_lock, flags);
+#endif
 	}
 
 	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
@@ -666,12 +672,32 @@ void lru_add_drain(void)
 	local_unlock_cpu(swapvec_lock);
 }
 
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+	local_lock_on(swapvec_lock, cpu);
+	lru_add_drain_cpu(cpu);
+	local_unlock_on(swapvec_lock, cpu);
+}
+
+#else
+
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
 	lru_add_drain();
 }
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+	struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+	INIT_WORK(work, lru_add_drain_per_cpu);
+	schedule_work_on(cpu, work);
+	cpumask_set_cpu(cpu, has_work);
+}
+#endif
 
 void lru_add_drain_all(void)
 {
@@ -684,21 +710,18 @@ void lru_add_drain_all(void)
 	cpumask_clear(&has_work);
 
 	for_each_online_cpu(cpu) {
-		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
-
 		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
 		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
 		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
 		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
-		    need_activate_page_drain(cpu)) {
-			INIT_WORK(work, lru_add_drain_per_cpu);
-			schedule_work_on(cpu, work);
-			cpumask_set_cpu(cpu, &has_work);
-		}
+		    need_activate_page_drain(cpu))
+			remote_lru_add_drain(cpu, &has_work);
 	}
 
+#ifndef CONFIG_PREEMPT_RT_BASE
 	for_each_cpu(cpu, &has_work)
 		flush_work(&per_cpu(lru_add_drain_work, cpu));
+#endif
 
 	put_online_cpus();
 	mutex_unlock(&lock);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq()
  2016-06-03 10:44 [ANNOUNCE] 4.6.1-rt3 Sebastian Andrzej Siewior
@ 2016-06-05  6:11 ` Mike Galbraith
  2016-06-06  8:52   ` Sebastian Andrzej Siewior
  2016-06-05  6:16 ` [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency Mike Galbraith
  2016-06-07  4:19 ` [patch rfc] locking/rwsem: Add down_write_killable/killable_nested() Mike Galbraith
  2 siblings, 1 reply; 7+ messages in thread
From: Mike Galbraith @ 2016-06-05  6:11 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior, Thomas Gleixner
  Cc: LKML, linux-rt-users, Steven Rostedt

v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
Convert it to use the existing local lock (event_lock) like the others.

Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
---
 mm/memcontrol.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5557,10 +5557,10 @@ void mem_cgroup_migrate(struct page *old
 
 	commit_charge(newpage, memcg, false);
 
-	local_irq_disable();
+	local_lock_irq(event_lock);
 	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
 	memcg_check_events(memcg, newpage);
-	local_irq_enable();
+	local_unlock_irq(event_lock);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency
  2016-06-03 10:44 [ANNOUNCE] 4.6.1-rt3 Sebastian Andrzej Siewior
  2016-06-05  6:11 ` [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq() Mike Galbraith
@ 2016-06-05  6:16 ` Mike Galbraith
  2016-06-06  8:52   ` Sebastian Andrzej Siewior
  2016-06-07  4:19 ` [patch rfc] locking/rwsem: Add down_write_killable/killable_nested() Mike Galbraith
  2 siblings, 1 reply; 7+ messages in thread
From: Mike Galbraith @ 2016-06-05  6:16 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior, Thomas Gleixner
  Cc: LKML, linux-rt-users, Steven Rostedt


The internal bits are already swork_blah, rename source to match. 

Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
---
 arch/x86/kernel/cpu/mcheck/mce.c       |    2 
 drivers/thermal/x86_pkg_temp_thermal.c |    2 
 fs/aio.c                               |    2 
 include/linux/cgroup-defs.h            |    2 
 include/linux/swork.h                  |   24 ++++
 include/linux/work-simple.h            |   24 ----
 kernel/sched/Makefile                  |    2 
 kernel/sched/swork.c                   |  173 +++++++++++++++++++++++++++++++++
 kernel/sched/work-simple.c             |  173 ---------------------------------
 9 files changed, 202 insertions(+), 202 deletions(-)

--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -42,7 +42,7 @@
 #include <linux/irq_work.h>
 #include <linux/export.h>
 #include <linux/jiffies.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #include <asm/processor.h>
 #include <asm/traps.h>
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -29,7 +29,7 @@
 #include <linux/pm.h>
 #include <linux/thermal.h>
 #include <linux/debugfs.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 #include <asm/cpu_device_id.h>
 #include <asm/mce.h>
 
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,7 +40,7 @@
 #include <linux/ramfs.h>
 #include <linux/percpu-refcount.h>
 #include <linux/mount.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,7 +16,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #ifdef CONFIG_CGROUPS
 
--- /dev/null
+++ b/include/linux/swork.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
+
+#include <linux/list.h>
+
+struct swork_event {
+	struct list_head item;
+	unsigned long flags;
+	void (*func)(struct swork_event *);
+};
+
+static inline void INIT_SWORK(struct swork_event *event,
+			      void (*func)(struct swork_event *))
+{
+	event->flags = 0;
+	event->func = func;
+}
+
+bool swork_queue(struct swork_event *sev);
+
+int swork_get(void);
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
--- a/include/linux/work-simple.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _LINUX_SWORK_H
-#define _LINUX_SWORK_H
-
-#include <linux/list.h>
-
-struct swork_event {
-	struct list_head item;
-	unsigned long flags;
-	void (*func)(struct swork_event *);
-};
-
-static inline void INIT_SWORK(struct swork_event *event,
-			      void (*func)(struct swork_event *))
-{
-	event->flags = 0;
-	event->func = func;
-}
-
-bool swork_queue(struct swork_event *sev);
-
-int swork_get(void);
-void swork_put(void);
-
-#endif /* _LINUX_SWORK_H */
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,7 +17,7 @@ endif
 
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o work-simple.o completion.o idle.o
+obj-y += wait.o swait.o swork.o completion.o idle.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
--- /dev/null
+++ b/kernel/sched/swork.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
+ *
+ * Provides a framework for enqueuing callbacks from irq context
+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
+ */
+
+#include <linux/swait.h>
+#include <linux/swork.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define SWORK_EVENT_PENDING     (1 << 0)
+
+static DEFINE_MUTEX(worker_mutex);
+static struct sworker *glob_worker;
+
+struct sworker {
+	struct list_head events;
+	struct swait_queue_head wq;
+
+	raw_spinlock_t lock;
+
+	struct task_struct *task;
+	int refs;
+};
+
+static bool swork_readable(struct sworker *worker)
+{
+	bool r;
+
+	if (kthread_should_stop())
+		return true;
+
+	raw_spin_lock_irq(&worker->lock);
+	r = !list_empty(&worker->events);
+	raw_spin_unlock_irq(&worker->lock);
+
+	return r;
+}
+
+static int swork_kthread(void *arg)
+{
+	struct sworker *worker = arg;
+
+	for (;;) {
+		swait_event_interruptible(worker->wq,
+					swork_readable(worker));
+		if (kthread_should_stop())
+			break;
+
+		raw_spin_lock_irq(&worker->lock);
+		while (!list_empty(&worker->events)) {
+			struct swork_event *sev;
+
+			sev = list_first_entry(&worker->events,
+					struct swork_event, item);
+			list_del(&sev->item);
+			raw_spin_unlock_irq(&worker->lock);
+
+			WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
+							 &sev->flags));
+			sev->func(sev);
+			raw_spin_lock_irq(&worker->lock);
+		}
+		raw_spin_unlock_irq(&worker->lock);
+	}
+	return 0;
+}
+
+static struct sworker *swork_create(void)
+{
+	struct sworker *worker;
+
+	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+	if (!worker)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&worker->events);
+	raw_spin_lock_init(&worker->lock);
+	init_swait_queue_head(&worker->wq);
+
+	worker->task = kthread_run(swork_kthread, worker, "kswork");
+	if (IS_ERR(worker->task)) {
+		kfree(worker);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return worker;
+}
+
+static void swork_destroy(struct sworker *worker)
+{
+	kthread_stop(worker->task);
+
+	WARN_ON(!list_empty(&worker->events));
+	kfree(worker);
+}
+
+/**
+ * swork_queue - queue swork
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
+ *
+ * The work is queued and processed on a random CPU
+ */
+bool swork_queue(struct swork_event *sev)
+{
+	unsigned long flags;
+
+	if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
+		return false;
+
+	raw_spin_lock_irqsave(&glob_worker->lock, flags);
+	list_add_tail(&sev->item, &glob_worker->events);
+	raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
+
+	swake_up(&glob_worker->wq);
+	return true;
+}
+EXPORT_SYMBOL_GPL(swork_queue);
+
+/**
+ * swork_get - get an instance of the sworker
+ *
+ * Returns an negative error code if the initialization if the worker did not
+ * work, %0 otherwise.
+ *
+ */
+int swork_get(void)
+{
+	struct sworker *worker;
+
+	mutex_lock(&worker_mutex);
+	if (!glob_worker) {
+		worker = swork_create();
+		if (IS_ERR(worker)) {
+			mutex_unlock(&worker_mutex);
+			return -ENOMEM;
+		}
+
+		glob_worker = worker;
+	}
+
+	glob_worker->refs++;
+	mutex_unlock(&worker_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(swork_get);
+
+/**
+ * swork_put - puts an instance of the sworker
+ *
+ * Will destroy the sworker thread. This function must not be called until all
+ * queued events have been completed.
+ */
+void swork_put(void)
+{
+	mutex_lock(&worker_mutex);
+
+	glob_worker->refs--;
+	if (glob_worker->refs > 0)
+		goto out;
+
+	swork_destroy(glob_worker);
+	glob_worker = NULL;
+out:
+	mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
--- a/kernel/sched/work-simple.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
- *
- * Provides a framework for enqueuing callbacks from irq context
- * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
- */
-
-#include <linux/swait.h>
-#include <linux/work-simple.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-
-#define SWORK_EVENT_PENDING     (1 << 0)
-
-static DEFINE_MUTEX(worker_mutex);
-static struct sworker *glob_worker;
-
-struct sworker {
-	struct list_head events;
-	struct swait_queue_head wq;
-
-	raw_spinlock_t lock;
-
-	struct task_struct *task;
-	int refs;
-};
-
-static bool swork_readable(struct sworker *worker)
-{
-	bool r;
-
-	if (kthread_should_stop())
-		return true;
-
-	raw_spin_lock_irq(&worker->lock);
-	r = !list_empty(&worker->events);
-	raw_spin_unlock_irq(&worker->lock);
-
-	return r;
-}
-
-static int swork_kthread(void *arg)
-{
-	struct sworker *worker = arg;
-
-	for (;;) {
-		swait_event_interruptible(worker->wq,
-					swork_readable(worker));
-		if (kthread_should_stop())
-			break;
-
-		raw_spin_lock_irq(&worker->lock);
-		while (!list_empty(&worker->events)) {
-			struct swork_event *sev;
-
-			sev = list_first_entry(&worker->events,
-					struct swork_event, item);
-			list_del(&sev->item);
-			raw_spin_unlock_irq(&worker->lock);
-
-			WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
-							 &sev->flags));
-			sev->func(sev);
-			raw_spin_lock_irq(&worker->lock);
-		}
-		raw_spin_unlock_irq(&worker->lock);
-	}
-	return 0;
-}
-
-static struct sworker *swork_create(void)
-{
-	struct sworker *worker;
-
-	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
-	if (!worker)
-		return ERR_PTR(-ENOMEM);
-
-	INIT_LIST_HEAD(&worker->events);
-	raw_spin_lock_init(&worker->lock);
-	init_swait_queue_head(&worker->wq);
-
-	worker->task = kthread_run(swork_kthread, worker, "kswork");
-	if (IS_ERR(worker->task)) {
-		kfree(worker);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return worker;
-}
-
-static void swork_destroy(struct sworker *worker)
-{
-	kthread_stop(worker->task);
-
-	WARN_ON(!list_empty(&worker->events));
-	kfree(worker);
-}
-
-/**
- * swork_queue - queue swork
- *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
- * The work is queued and processed on a random CPU
- */
-bool swork_queue(struct swork_event *sev)
-{
-	unsigned long flags;
-
-	if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
-		return false;
-
-	raw_spin_lock_irqsave(&glob_worker->lock, flags);
-	list_add_tail(&sev->item, &glob_worker->events);
-	raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
-
-	swake_up(&glob_worker->wq);
-	return true;
-}
-EXPORT_SYMBOL_GPL(swork_queue);
-
-/**
- * swork_get - get an instance of the sworker
- *
- * Returns an negative error code if the initialization if the worker did not
- * work, %0 otherwise.
- *
- */
-int swork_get(void)
-{
-	struct sworker *worker;
-
-	mutex_lock(&worker_mutex);
-	if (!glob_worker) {
-		worker = swork_create();
-		if (IS_ERR(worker)) {
-			mutex_unlock(&worker_mutex);
-			return -ENOMEM;
-		}
-
-		glob_worker = worker;
-	}
-
-	glob_worker->refs++;
-	mutex_unlock(&worker_mutex);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(swork_get);
-
-/**
- * swork_put - puts an instance of the sworker
- *
- * Will destroy the sworker thread. This function must not be called until all
- * queued events have been completed.
- */
-void swork_put(void)
-{
-	mutex_lock(&worker_mutex);
-
-	glob_worker->refs--;
-	if (glob_worker->refs > 0)
-		goto out;
-
-	swork_destroy(glob_worker);
-	glob_worker = NULL;
-out:
-	mutex_unlock(&worker_mutex);
-}
-EXPORT_SYMBOL_GPL(swork_put);

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq()
  2016-06-05  6:11 ` [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq() Mike Galbraith
@ 2016-06-06  8:52   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 7+ messages in thread
From: Sebastian Andrzej Siewior @ 2016-06-06  8:52 UTC (permalink / raw)
  To: Mike Galbraith, Thomas Gleixner; +Cc: LKML, linux-rt-users, Steven Rostedt

On 06/05/2016 08:11 AM, Mike Galbraith wrote:
> v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
> Convert it to use the existing local lock (event_lock) like the others.
> 
> Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>

Applied.

Sebastian

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency
  2016-06-05  6:16 ` [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency Mike Galbraith
@ 2016-06-06  8:52   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 7+ messages in thread
From: Sebastian Andrzej Siewior @ 2016-06-06  8:52 UTC (permalink / raw)
  To: Mike Galbraith, Thomas Gleixner; +Cc: LKML, linux-rt-users, Steven Rostedt

On 06/05/2016 08:16 AM, Mike Galbraith wrote:
> 
> The internal bits are already swork_blah, rename source to match. 
> 
> Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>

I don't mind the rename to keep things consistent. I merged the rename
into the original patch.

Sebastian

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch rfc] locking/rwsem: Add down_write_killable/killable_nested()
  2016-06-03 10:44 [ANNOUNCE] 4.6.1-rt3 Sebastian Andrzej Siewior
  2016-06-05  6:11 ` [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq() Mike Galbraith
  2016-06-05  6:16 ` [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency Mike Galbraith
@ 2016-06-07  4:19 ` Mike Galbraith
  2016-06-09 13:06   ` Sebastian Andrzej Siewior
  2 siblings, 1 reply; 7+ messages in thread
From: Mike Galbraith @ 2016-06-07  4:19 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior, Thomas Gleixner
  Cc: LKML, linux-rt-users, Steven Rostedt

v4.7 added down_write_killable/killable_nested(), add them to -rt.

Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
 include/linux/rwsem_rt.h |   12 ++++++++++++
 kernel/locking/rt.c      |   24 ++++++++++++++++++++++++
 2 files changed, 36 insertions(+)

--- a/include/linux/rwsem_rt.h
+++ b/include/linux/rwsem_rt.h
@@ -52,8 +52,10 @@ do {							\
 } while (0)
 
 extern void rt_down_write(struct rw_semaphore *rwsem);
+extern int __must_check rt_down_write_killable(struct rw_semaphore *rwsem);
 extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
 extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
+extern int __must_check rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass);
 extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
 				      struct lockdep_map *nest);
 extern void rt__down_read(struct rw_semaphore *rwsem);
@@ -100,6 +102,11 @@ static inline void down_write(struct rw_
 	rt_down_write(sem);
 }
 
+static inline int down_write_killable(struct rw_semaphore *sem)
+{
+	return rt_down_write_killable(sem);
+}
+
 static inline int down_write_trylock(struct rw_semaphore *sem)
 {
 	return rt_down_write_trylock(sem);
@@ -134,6 +141,11 @@ static inline void down_write_nested(str
 {
 	rt_down_write_nested(sem, subclass);
 }
+
+static inline int down_write_killable_nested(struct rw_semaphore *sem, int subclass)
+{
+	return rt_down_write_killable_nested(sem, subclass);
+}
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static inline void down_write_nest_lock(struct rw_semaphore *sem,
 		struct rw_semaphore *nest_lock)
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
@@ -358,6 +358,18 @@ void  rt_down_write(struct rw_semaphore
 }
 EXPORT_SYMBOL(rt_down_write);
 
+int  rt_down_write_killable(struct rw_semaphore *rwsem)
+{
+	int ret;
+
+	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_lock_killable(&rwsem->lock);
+	if (ret)
+		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(rt_down_write_killable);
+
 void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
 {
 	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
@@ -365,6 +377,18 @@ void  rt_down_write_nested(struct rw_sem
 }
 EXPORT_SYMBOL(rt_down_write_nested);
 
+int  rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
+{
+	int ret;
+
+	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+	ret = rt_mutex_lock_killable(&rwsem->lock);
+	if (ret)
+		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(rt_down_write_killable_nested);
+
 void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
 			       struct lockdep_map *nest)
 {

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [patch rfc] locking/rwsem: Add down_write_killable/killable_nested()
  2016-06-07  4:19 ` [patch rfc] locking/rwsem: Add down_write_killable/killable_nested() Mike Galbraith
@ 2016-06-09 13:06   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 7+ messages in thread
From: Sebastian Andrzej Siewior @ 2016-06-09 13:06 UTC (permalink / raw)
  To: Mike Galbraith; +Cc: Thomas Gleixner, LKML, linux-rt-users, Steven Rostedt

* Mike Galbraith | 2016-06-07 06:19:19 [+0200]:

>v4.7 added down_write_killable/killable_nested(), add them to -rt.

looks good, I will postpone this until v4.8-RT

>Signed-off-by: Mike Galbraith <mgalbraith@suse.de>

Sebastian

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-06-09 13:06 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-03 10:44 [ANNOUNCE] 4.6.1-rt3 Sebastian Andrzej Siewior
2016-06-05  6:11 ` [patch] mm/memcontrol.c::mem_cgroup_migrate() - replace another local_irq_disable() w. local_lock_irq() Mike Galbraith
2016-06-06  8:52   ` Sebastian Andrzej Siewior
2016-06-05  6:16 ` [patch rfc] work-simple: Rename work-simple.[ch] to swork.[ch] for consistency Mike Galbraith
2016-06-06  8:52   ` Sebastian Andrzej Siewior
2016-06-07  4:19 ` [patch rfc] locking/rwsem: Add down_write_killable/killable_nested() Mike Galbraith
2016-06-09 13:06   ` Sebastian Andrzej Siewior

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).