[ANNOUNCE] 4.14.175-rt81
diff mbox series

Message ID fcc3709169c47351b6e6fd9bf5fa0162db5b4bf1.camel@kernel.org
State New
Headers show
Series
  • [ANNOUNCE] 4.14.175-rt81
Related show

Commit Message

Tom Zanussi April 17, 2020, 7:52 p.m. UTC
Hello RT Folks!

I'm pleased to announce the 4.14.175-rt81 stable release.

You can get this release via the git tree at:

  git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

  branch: v4.14-rt
  Head SHA1: aad6ed737a078d30926be3205fa67b03018a6ded

Or to build 4.14.175-rt81 directly, the following patches should be applied:

  https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.14.tar.xz

  https://www.kernel.org/pub/linux/kernel/v4.x/patch-4.14.175.xz

  https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/patch-4.14.175-rt81.patch.xz


You can also build from 4.14.175-rt80 by applying the incremental patch:

  https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/incr/patch-4.14.175-rt80-rt81.patch.xz

Enjoy!

   Tom

Changes from v4.14.175-rt80:
---

Steven Rostedt (VMware) (1):
      irq_work: Fix checking of IRQ_WORK_LAZY flag set on non PREEMPT_RT

Tom Zanussi (1):
      Linux 4.14.175-rt81
---
kernel/irq_work.c | 12 +++++++++---
 localversion-rt   |  2 +-
 2 files changed, 10 insertions(+), 4 deletions(-)
---

Patch
diff mbox series

diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 2899ba0d23d1..838b56cef5fe 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -57,6 +57,12 @@  void __weak arch_irq_work_raise(void)
 	 */
 }
 
+static inline bool use_lazy_list(struct irq_work *work)
+{
+	return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
+		|| (work->flags & IRQ_WORK_LAZY);
+}
+
 #ifdef CONFIG_SMP
 /*
  * Enqueue the irq_work @work on @cpu unless it's already pending
@@ -78,7 +84,7 @@  bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (!irq_work_claim(work))
 		return false;
 
-	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
+	if (use_lazy_list(work))
 		list = &per_cpu(lazy_list, cpu);
 	else
 		list = &per_cpu(raised_list, cpu);
@@ -95,7 +101,7 @@  EXPORT_SYMBOL_GPL(irq_work_queue_on);
 bool irq_work_queue(struct irq_work *work)
 {
 	struct llist_head *list;
-	bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+	bool lazy_work;
 
 	/* Only queue if not already pending */
 	if (!irq_work_claim(work))
@@ -106,7 +112,7 @@  bool irq_work_queue(struct irq_work *work)
 
 	lazy_work = work->flags & IRQ_WORK_LAZY;
 
-	if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+	if (use_lazy_list(work))
 		list = this_cpu_ptr(&lazy_list);
 	else
 		list = this_cpu_ptr(&raised_list);
diff --git a/localversion-rt b/localversion-rt
index 5ba2c2091cf9..8269ec129c0c 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@ 
--rt80
+-rt81