From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755874AbbBPSDk (ORCPT ); Mon, 16 Feb 2015 13:03:40 -0500 Received: from www.linutronix.de ([62.245.132.108]:39054 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753436AbbBPSDj convert rfc822-to-8bit (ORCPT ); Mon, 16 Feb 2015 13:03:39 -0500 Date: Mon, 16 Feb 2015 19:03:37 +0100 From: Sebastian Andrzej Siewior To: Daniel Wagner Cc: linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org Message-ID: <20150216180337.GI21649@linutronix.de> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline Content-Transfer-Encoding: 8BIT User-Agent: Mutt/1.5.21 (2010-09-15) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org rostedt@goodmis.org, paul.gortmaker@windriver.com Bcc: Subject: [PATCH RT] work-simple: Simple work queue implemenation Reply-To: This is swork patch which is in -RT since v3.18. Two users so far… From: Daniel Wagner Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. Bases on wait-simple. Signed-off-by: Daniel Wagner Cc: Sebastian Andrzej Siewior --- include/linux/work-simple.h | 24 ++++++ kernel/sched/Makefile | 2 kernel/sched/work-simple.c | 176 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+), 1 deletion(-) create mode 100644 include/linux/work-simple.h create mode 100644 kernel/sched/work-simple.c --- /dev/null +++ b/include/linux/work-simple.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SWORK_H +#define _LINUX_SWORK_H + +#include + +struct swork_event { + struct list_head item; + unsigned long flags; + void (*func)(struct swork_event *); +}; + +static inline void INIT_SWORK(struct swork_event *event, + void (*func)(struct swork_event *)) +{ + event->flags = 0; + event->func = func; +} + +bool swork_queue(struct swork_event *sev); + +int swork_get(void); +void swork_put(void); + +#endif /* _LINUX_SWORK_H */ --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -13,7 +13,7 @@ endif obj-y += core.o proc.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o -obj-y += wait.o wait-simple.o completion.o idle.o +obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o --- /dev/null +++ b/kernel/sched/work-simple.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de + * + * Provides a framework for enqueuing callbacks from irq context + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. + */ + +#include +#include +#include +#include +#include + +#define SWORK_EVENT_PENDING (1 << 0) + +static DEFINE_MUTEX(worker_mutex); +static struct sworker *glob_worker; + +struct sworker { + struct list_head events; + struct swait_head wq; + + raw_spinlock_t lock; + + struct task_struct *task; + int refs; +}; + +static bool swork_readable(struct sworker *worker) +{ + bool r; + + if (kthread_should_stop()) + return true; + + raw_spin_lock(&worker->lock); + r = !list_empty(&worker->events); + raw_spin_unlock(&worker->lock); + + return r; +} + +static int swork_kthread(void *arg) +{ + struct sworker *worker = arg; + + pr_info("swork_kthread enter\n"); + + for (;;) { + swait_event_interruptible(worker->wq, + swork_readable(worker)); + if (kthread_should_stop()) + break; + + raw_spin_lock(&worker->lock); + while (!list_empty(&worker->events)) { + struct swork_event *sev; + + sev = list_first_entry(&worker->events, + struct swork_event, item); + list_del(&sev->item); + raw_spin_unlock(&worker->lock); + + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, + &sev->flags)); + sev->func(sev); + raw_spin_lock(&worker->lock); + } + raw_spin_unlock(&worker->lock); + } + + pr_info("swork_kthread exit\n"); + return 0; +} + +static struct sworker *swork_create(void) +{ + struct sworker *worker; + + worker = kzalloc(sizeof(*worker), GFP_KERNEL); + if (!worker) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&worker->events); + raw_spin_lock_init(&worker->lock); + init_swait_head(&worker->wq); + + worker->task = kthread_run(swork_kthread, worker, "kswork"); + if (IS_ERR(worker->task)) { + kfree(worker); + return ERR_PTR(-ENOMEM); + } + + return worker; +} + +static void swork_destroy(struct sworker *worker) +{ + kthread_stop(worker->task); + + WARN_ON(!list_empty(&worker->events)); + kfree(worker); +} + +/** + * swork_queue - queue swork + * + * Returns %false if @work was already on a queue, %true otherwise. + * + * The work is queued and processed on a random CPU + */ +bool swork_queue(struct swork_event *sev) +{ + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) + return false; + + WARN_ON(irqs_disabled()); + + raw_spin_lock(&glob_worker->lock); + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock(&glob_worker->lock); + + swait_wake(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); + +/** + * swork_get - get an instance of the sworker + * + * Returns an negative error code if the initialization if the worker did not + * work, %0 otherwise. + * + */ +int swork_get(void) +{ + struct sworker *worker; + + mutex_lock(&worker_mutex); + if (!glob_worker) { + worker = swork_create(); + if (IS_ERR(worker)) { + mutex_unlock(&worker_mutex); + return -ENOMEM; + } + + glob_worker = worker; + } + + glob_worker->refs++; + mutex_unlock(&worker_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(swork_get); + +/** + * swork_put - puts an instance of the sworker + * + * Will destroy the sworker thread. This function must not be called until all + * queued events have been completed. + */ +void swork_put(void) +{ + mutex_lock(&worker_mutex); + + glob_worker->refs--; + if (glob_worker->refs > 0) + goto out; + + swork_destroy(glob_worker); + glob_worker = NULL; +out: + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put); From mboxrd@z Thu Jan 1 00:00:00 1970 From: Sebastian Andrzej Siewior Subject: (unknown) Date: Mon, 16 Feb 2015 19:03:37 +0100 Message-ID: <20150216180337.GI21649@linutronix.de> Mime-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: QUOTED-PRINTABLE Cc: linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org To: Daniel Wagner Return-path: Content-Disposition: inline Sender: linux-kernel-owner@vger.kernel.org List-Id: linux-rt-users.vger.kernel.org rostedt@goodmis.org, paul.gortmaker@windriver.com Bcc:=20 Subject: [PATCH RT] work-simple: Simple work queue implemenation Reply-To:=20 This is swork patch which is in -RT since v3.18. Two users so far=E2=80= =A6 =46rom: Daniel Wagner Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. Bases on wait-simple. Signed-off-by: Daniel Wagner Cc: Sebastian Andrzej Siewior --- include/linux/work-simple.h | 24 ++++++ kernel/sched/Makefile | 2=20 kernel/sched/work-simple.c | 176 +++++++++++++++++++++++++++++++++++= +++++++++ 3 files changed, 201 insertions(+), 1 deletion(-) create mode 100644 include/linux/work-simple.h create mode 100644 kernel/sched/work-simple.c --- /dev/null +++ b/include/linux/work-simple.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SWORK_H +#define _LINUX_SWORK_H + +#include + +struct swork_event { + struct list_head item; + unsigned long flags; + void (*func)(struct swork_event *); +}; + +static inline void INIT_SWORK(struct swork_event *event, + void (*func)(struct swork_event *)) +{ + event->flags =3D 0; + event->func =3D func; +} + +bool swork_queue(struct swork_event *sev); + +int swork_get(void); +void swork_put(void); + +#endif /* _LINUX_SWORK_H */ --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -13,7 +13,7 @@ endif =20 obj-y +=3D core.o proc.o clock.o cputime.o obj-y +=3D idle_task.o fair.o rt.o deadline.o stop_task.o -obj-y +=3D wait.o wait-simple.o completion.o idle.o +obj-y +=3D wait.o wait-simple.o work-simple.o completion.o idle.o obj-$(CONFIG_SMP) +=3D cpupri.o cpudeadline.o obj-$(CONFIG_SCHED_AUTOGROUP) +=3D auto_group.o obj-$(CONFIG_SCHEDSTATS) +=3D stats.o --- /dev/null +++ b/kernel/sched/work-simple.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw= -carit.de + * + * Provides a framework for enqueuing callbacks from irq context + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context= =2E + */ + +#include +#include +#include +#include +#include + +#define SWORK_EVENT_PENDING (1 << 0) + +static DEFINE_MUTEX(worker_mutex); +static struct sworker *glob_worker; + +struct sworker { + struct list_head events; + struct swait_head wq; + + raw_spinlock_t lock; + + struct task_struct *task; + int refs; +}; + +static bool swork_readable(struct sworker *worker) +{ + bool r; + + if (kthread_should_stop()) + return true; + + raw_spin_lock(&worker->lock); + r =3D !list_empty(&worker->events); + raw_spin_unlock(&worker->lock); + + return r; +} + +static int swork_kthread(void *arg) +{ + struct sworker *worker =3D arg; + + pr_info("swork_kthread enter\n"); + + for (;;) { + swait_event_interruptible(worker->wq, + swork_readable(worker)); + if (kthread_should_stop()) + break; + + raw_spin_lock(&worker->lock); + while (!list_empty(&worker->events)) { + struct swork_event *sev; + + sev =3D list_first_entry(&worker->events, + struct swork_event, item); + list_del(&sev->item); + raw_spin_unlock(&worker->lock); + + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, + &sev->flags)); + sev->func(sev); + raw_spin_lock(&worker->lock); + } + raw_spin_unlock(&worker->lock); + } + + pr_info("swork_kthread exit\n"); + return 0; +} + +static struct sworker *swork_create(void) +{ + struct sworker *worker; + + worker =3D kzalloc(sizeof(*worker), GFP_KERNEL); + if (!worker) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&worker->events); + raw_spin_lock_init(&worker->lock); + init_swait_head(&worker->wq); + + worker->task =3D kthread_run(swork_kthread, worker, "kswork"); + if (IS_ERR(worker->task)) { + kfree(worker); + return ERR_PTR(-ENOMEM); + } + + return worker; +} + +static void swork_destroy(struct sworker *worker) +{ + kthread_stop(worker->task); + + WARN_ON(!list_empty(&worker->events)); + kfree(worker); +} + +/** + * swork_queue - queue swork + * + * Returns %false if @work was already on a queue, %true otherwise. + * + * The work is queued and processed on a random CPU + */ +bool swork_queue(struct swork_event *sev) +{ + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) + return false; + + WARN_ON(irqs_disabled()); + + raw_spin_lock(&glob_worker->lock); + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock(&glob_worker->lock); + + swait_wake(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); + +/** + * swork_get - get an instance of the sworker + * + * Returns an negative error code if the initialization if the worker = did not + * work, %0 otherwise. + * + */ +int swork_get(void) +{ + struct sworker *worker; + + mutex_lock(&worker_mutex); + if (!glob_worker) { + worker =3D swork_create(); + if (IS_ERR(worker)) { + mutex_unlock(&worker_mutex); + return -ENOMEM; + } + + glob_worker =3D worker; + } + + glob_worker->refs++; + mutex_unlock(&worker_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(swork_get); + +/** + * swork_put - puts an instance of the sworker + * + * Will destroy the sworker thread. This function must not be called u= ntil all + * queued events have been completed. + */ +void swork_put(void) +{ + mutex_lock(&worker_mutex); + + glob_worker->refs--; + if (glob_worker->refs > 0) + goto out; + + swork_destroy(glob_worker); + glob_worker =3D NULL; +out: + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put);