All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] cobalt/events: add auto-clear feature
@ 2022-04-06 15:56 Philippe Gerum
  2022-04-06 15:56 ` [PATCH 2/5] cobalt/events: add single-waiter delivery mode Philippe Gerum
                   ` (4 more replies)
  0 siblings, 5 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-06 15:56 UTC (permalink / raw)
  To: xenomai

From: Philippe Gerum <rpm@xenomai.org>

The current implementation does not atomically consume+clear the event
set to be received by the waiter(s), which makes it useless for
anything but a plain one-time latch due to the race window this opens
with a consume[A]->signal[B]->clear[A] sequence.

To address this issue, let's provide the auto-clear feature with
__cobalt_event_wait().

This change affects the ABI by adding the auto-clear mode as an opt-in
feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
cobalt_event_init().

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
---
 configure.ac                        |   1 +
 include/cobalt/uapi/event.h         |   7 +-
 kernel/cobalt/posix/event.c         |  17 +++-
 testsuite/smokey/Makefile.am        |   2 +
 testsuite/smokey/events/Makefile.am |   8 ++
 testsuite/smokey/events/events.c    | 123 ++++++++++++++++++++++++++++
 6 files changed, 152 insertions(+), 6 deletions(-)
 create mode 100644 testsuite/smokey/events/Makefile.am
 create mode 100644 testsuite/smokey/events/events.c

diff --git a/configure.ac b/configure.ac
index 5611b5b8db..15d3e46e9a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1002,6 +1002,7 @@ AC_CONFIG_FILES([ \
 	testsuite/smokey/gdb/Makefile \
 	testsuite/smokey/y2038/Makefile \
 	testsuite/smokey/can/Makefile
+	testsuite/smokey/events/Makefile \
 	testsuite/clocktest/Makefile \
 	testsuite/xeno-test/Makefile \
 	utils/Makefile \
diff --git a/include/cobalt/uapi/event.h b/include/cobalt/uapi/event.h
index 8710e8e25f..14ddbcf567 100644
--- a/include/cobalt/uapi/event.h
+++ b/include/cobalt/uapi/event.h
@@ -30,9 +30,10 @@ struct cobalt_event_state {
 struct cobalt_event;
 
 /* Creation flags. */
-#define COBALT_EVENT_FIFO    0x0
-#define COBALT_EVENT_PRIO    0x1
-#define COBALT_EVENT_SHARED  0x2
+#define COBALT_EVENT_FIFO       0x0
+#define COBALT_EVENT_PRIO       0x1
+#define COBALT_EVENT_SHARED     0x2
+#define COBALT_EVENT_AUTOCLEAR  0x4
 
 /* Wait mode. */
 #define COBALT_EVENT_ALL  0x0
diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
index 052c686050..0a1236ad73 100644
--- a/kernel/cobalt/posix/event.c
+++ b/kernel/cobalt/posix/event.c
@@ -146,7 +146,7 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
 	if (bits == 0) {
 		/*
 		 * Special case: we don't wait for any event, we only
-		 * return the current flag group value.
+		 * return the pending ones without consuming them.
 		 */
 		rbits = state->value;
 		goto out;
@@ -155,8 +155,11 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
 	state->flags |= COBALT_EVENT_PENDED;
 	rbits = state->value & bits;
 	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
-	if (rbits && rbits == testval)
+	if (rbits && rbits == testval) {
+		if (event->flags & COBALT_EVENT_AUTOCLEAR)
+			state->value &= ~rbits;
 		goto done;
+	}
 
 	if (timeout == XN_NONBLOCK) {
 		ret = -EWOULDBLOCK;
@@ -239,7 +242,7 @@ COBALT_SYSCALL(event_wait64, primary,
 COBALT_SYSCALL(event_sync, current,
 	       (struct cobalt_event_shadow __user *u_event))
 {
-	unsigned int bits, waitval, testval;
+	unsigned int bits, waitval, testval, consumed = 0;
 	struct xnthread_wait_context *wc;
 	struct cobalt_event_state *state;
 	struct event_wait_context *ewc;
@@ -275,10 +278,18 @@ COBALT_SYSCALL(event_sync, current,
 		if (waitval && waitval == testval) {
 			state->nwaiters--;
 			ewc->value = waitval;
+			consumed |= waitval;
 			xnsynch_wakeup_this_sleeper(&event->synch, p);
 		}
 	}
 
+	/*
+	 * If some flags were consumed and auto-clear is enabled,
+	 * clear the former.
+	 */
+	if (consumed && (event->flags & COBALT_EVENT_AUTOCLEAR))
+		state->value &= ~consumed;
+
 	xnsched_run();
 out:
 	xnlock_put_irqrestore(&nklock, s);
diff --git a/testsuite/smokey/Makefile.am b/testsuite/smokey/Makefile.am
index 4a9773f586..3f0521282b 100644
--- a/testsuite/smokey/Makefile.am
+++ b/testsuite/smokey/Makefile.am
@@ -14,6 +14,7 @@ COBALT_SUBDIRS = 	\
 	bufp		\
 	can		\
 	cpu-affinity	\
+	events		\
 	fpu-stress	\
 	gdb		\
 	iddp		\
@@ -53,6 +54,7 @@ DIST_SUBDIRS = 		\
 	can		\
 	cpu-affinity	\
 	dlopen		\
+	events		\
 	fpu-stress	\
 	gdb		\
 	iddp		\
diff --git a/testsuite/smokey/events/Makefile.am b/testsuite/smokey/events/Makefile.am
new file mode 100644
index 0000000000..37b9f92d7f
--- /dev/null
+++ b/testsuite/smokey/events/Makefile.am
@@ -0,0 +1,8 @@
+
+noinst_LIBRARIES = libevents.a
+
+libevents_a_SOURCES = events.c
+
+libevents_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/testsuite/smokey/events/events.c b/testsuite/smokey/events/events.c
new file mode 100644
index 0000000000..79711795ac
--- /dev/null
+++ b/testsuite/smokey/events/events.c
@@ -0,0 +1,123 @@
+#include <stdlib.h>
+#include <unistd.h>
+#include <smokey/smokey.h>
+#include <cobalt/time.h>
+#include <cobalt/semaphore.h>
+#include <cobalt/pthread.h>
+#include <cobalt/sys/cobalt.h>
+#include <boilerplate/time.h>
+
+smokey_test_plugin(events,
+		   SMOKEY_NOARGS,
+		   "Check event event"
+);
+
+#define LOW_PRIO	1
+#define HIGH_PRIO	2
+
+struct test_context {
+	cobalt_event_t event;
+	sem_t start;
+	sem_t sem;
+};
+
+static void *event_receiver(void *arg)
+{
+	struct test_context *p = arg;
+	struct timespec now, timeout;
+	unsigned int bits;
+	int ret;
+
+	sem_wait(&p->start);
+	clock_gettime(CLOCK_MONOTONIC, &now);
+	timespec_adds(&timeout, &now, 400000000); /* 400ms */
+
+	/* Sender is quiet: expect timeout. */
+	if (!__F(ret, cobalt_event_wait(&p->event, -1U, &bits,
+				COBALT_EVENT_ANY, &timeout)) ||
+		!__Tassert(ret == -ETIMEDOUT))
+		goto fail;
+
+	sem_post(&p->sem);
+
+	/* Sender should have sent 0x12121212. */
+	clock_gettime(CLOCK_MONOTONIC, &now);
+	timespec_adds(&timeout, &now, 400000000); /* 400ms */
+	if (!__T(ret, cobalt_event_wait(&p->event, -1U, &bits,
+				COBALT_EVENT_ANY, &timeout)))
+		goto fail;
+
+	if (!__Tassert(bits == 0x12121212))
+		goto fail;
+
+	sem_post(&p->sem);
+
+	/* Sender should send 0x41414141 in a moment. */
+	if (!__T(ret, cobalt_event_wait(&p->event, 0x41414141, &bits,
+				COBALT_EVENT_ALL, NULL)))
+		goto fail;
+
+	if (!__Tassert(bits == 0x41414141))
+		goto fail;
+
+	sem_post(&p->sem);
+
+	return NULL;
+fail:
+	exit(-ret);
+}
+
+static int run_events(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param param;
+	struct test_context c;
+	pthread_attr_t attr;
+	void *status = NULL;
+	pthread_t receiver;
+	int ret;
+
+	param.sched_priority = HIGH_PRIO;
+	if (!__Tassert(pthread_setschedparam(pthread_self(),
+			SCHED_FIFO, &param) == 0))
+		return -EPERM;
+
+	sem_init(&c.sem, 0, 0);
+	sem_init(&c.start, 0, 0);
+
+	if (!__T(ret, cobalt_event_init(&c.event, 0,
+		COBALT_EVENT_FIFO|COBALT_EVENT_AUTOCLEAR)))
+		return ret;
+
+	param.sched_priority = LOW_PRIO;
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	pthread_attr_setschedparam(&attr, &param);
+
+	if (!__T(ret, pthread_create(&receiver, &attr, event_receiver, &c)))
+		return ret;
+
+	sem_post(&c.start);
+	sem_wait(&c.sem);
+
+	if (!__T(ret, cobalt_event_post(&c.event, 0x12121212)))
+		return ret;
+
+	sem_wait(&c.sem);
+	usleep(1000);
+
+	if (!__T(ret, cobalt_event_post(&c.event, 0x41414141)))
+		return ret;
+
+	sem_wait(&c.sem);
+
+	pthread_join(receiver, &status);
+	if (!__Tassert(status == NULL))
+		return -EPROTO;
+
+	if (!__T(ret, cobalt_event_destroy(&c.event)))
+		return ret;
+
+	return 0;
+}
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/5] cobalt/events: add single-waiter delivery mode
  2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
@ 2022-04-06 15:56 ` Philippe Gerum
  2022-04-07  9:49   ` Jan Kiszka
  2022-04-06 15:56 ` [PATCH 3/5] copperplate, testsuite: convert to cobalt_event_broadcast() Philippe Gerum
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: Philippe Gerum @ 2022-04-06 15:56 UTC (permalink / raw)
  To: xenomai

From: Philippe Gerum <rpm@xenomai.org>

Allow for sending a set of event bits to the heading waiter only,
instead of broadcasting them to all waiters.

This change affects the ABI, since we add a set of operation flags to
the cobalt_event_sync service, to pass the new COBALT_EVENT_BCAST
flag. It also affects the internal API as follows:

- the new cobalt_event_broadcast() call behaves like
  cobalt_event_post() formerly did, which is broadcasting the event to
  all waiters atomically.

- the new cobalt_event_signal() call implements the single-waiter
  delivery mode we introduce.

- the former cobalt_event_post() is now a wrapper to
  cobalt_event_broadcast(), marked with a deprecation flag.

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
---
 include/cobalt/sys/cobalt.h | 12 +++++++++++-
 include/cobalt/uapi/event.h | 10 +++++-----
 kernel/cobalt/posix/event.c | 37 +++++++++++++++----------------------
 kernel/cobalt/posix/event.h |  3 ++-
 lib/cobalt/internal.c       | 20 ++++++++++++--------
 5 files changed, 45 insertions(+), 37 deletions(-)

diff --git a/include/cobalt/sys/cobalt.h b/include/cobalt/sys/cobalt.h
index 46096e8801..1035a29d52 100644
--- a/include/cobalt/sys/cobalt.h
+++ b/include/cobalt/sys/cobalt.h
@@ -106,9 +106,19 @@ int cobalt_event_init(cobalt_event_t *event,
 		      unsigned int value,
 		      int flags);
 
-int cobalt_event_post(cobalt_event_t *event,
+int cobalt_event_signal(cobalt_event_t *event,
 		      unsigned int bits);
 
+int cobalt_event_broadcast(cobalt_event_t *event,
+			unsigned int bits);
+
+/* Backward compatibility with 3.2.x and earlier. */
+static inline int cobalt_event_post(cobalt_event_t *event,
+				unsigned int bits)
+{
+	return cobalt_event_broadcast(event, bits);
+}
+
 int cobalt_event_wait(cobalt_event_t *event,
 		      unsigned int bits,
 		      unsigned int *bits_r,
diff --git a/include/cobalt/uapi/event.h b/include/cobalt/uapi/event.h
index 14ddbcf567..52b71d4e3b 100644
--- a/include/cobalt/uapi/event.h
+++ b/include/cobalt/uapi/event.h
@@ -22,9 +22,6 @@
 
 struct cobalt_event_state {
 	__u32 value;
-	__u32 flags;
-#define COBALT_EVENT_PENDED  0x1
-	__u32 nwaiters;
 };
 
 struct cobalt_event;
@@ -36,8 +33,11 @@ struct cobalt_event;
 #define COBALT_EVENT_AUTOCLEAR  0x4
 
 /* Wait mode. */
-#define COBALT_EVENT_ALL  0x0
-#define COBALT_EVENT_ANY  0x1
+#define COBALT_EVENT_ALL    0x0
+#define COBALT_EVENT_ANY    0x1
+
+/* Sync mode. */
+#define COBALT_EVENT_BCAST  0x1
 
 struct cobalt_event_shadow {
 	__u32 state_offset;
diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
index 0a1236ad73..4433e733fd 100644
--- a/kernel/cobalt/posix/event.c
+++ b/kernel/cobalt/posix/event.c
@@ -85,8 +85,6 @@ COBALT_SYSCALL(event_init, current,
 	synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
 	xnsynch_init(&event->synch, synflags, NULL);
 	state->value = value;
-	state->flags = 0;
-	state->nwaiters = 0;
 	stateoff = cobalt_umm_offset(umm, state);
 	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
 
@@ -152,37 +150,31 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
 		goto out;
 	}
 
-	state->flags |= COBALT_EVENT_PENDED;
 	rbits = state->value & bits;
 	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
 	if (rbits && rbits == testval) {
 		if (event->flags & COBALT_EVENT_AUTOCLEAR)
 			state->value &= ~rbits;
-		goto done;
+		goto out;
 	}
 
 	if (timeout == XN_NONBLOCK) {
 		ret = -EWOULDBLOCK;
-		goto done;
+		goto out;
 	}
 
 	ewc.value = bits;
 	ewc.mode = mode;
 	xnthread_prepare_wait(&ewc.wc);
-	state->nwaiters++;
 	info = xnsynch_sleep_on(&event->synch, timeout, tmode);
 	if (info & XNRMID) {
 		ret = -EIDRM;
 		goto out;
 	}
-	if (info & (XNBREAK|XNTIMEO)) {
-		state->nwaiters--;
+	if (info & (XNBREAK|XNTIMEO))
 		ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
-	} else
+	else
 		rbits = ewc.value;
-done:
-	if (!xnsynch_pended_p(&event->synch))
-		state->flags &= ~COBALT_EVENT_PENDED;
 out:
 	xnlock_put_irqrestore(&nklock, s);
 
@@ -240,9 +232,10 @@ COBALT_SYSCALL(event_wait64, primary,
 }
 
 COBALT_SYSCALL(event_sync, current,
-	       (struct cobalt_event_shadow __user *u_event))
+	(struct cobalt_event_shadow __user *u_event,
+		unsigned int bits, int flags))
 {
-	unsigned int bits, waitval, testval, consumed = 0;
+	unsigned int waitval, testval, consumed = 0;
 	struct xnthread_wait_context *wc;
 	struct cobalt_event_state *state;
 	struct event_wait_context *ewc;
@@ -262,24 +255,24 @@ COBALT_SYSCALL(event_sync, current,
 		goto out;
 	}
 
-	/*
-	 * Userland has already updated the bitmask, our job is to
-	 * wake up any thread which could be satisfied by its current
-	 * value.
-	 */
 	state = event->state;
-	bits = state->value;
+	state->value |= bits;
 
+	/*
+	 * Wake up one or more threads satisfied by state->value,
+	 * depending on the flags.
+	 */
 	xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
 		wc = xnthread_get_wait_context(p);
 		ewc = container_of(wc, struct event_wait_context, wc);
-		waitval = ewc->value & bits;
+		waitval = ewc->value & state->value;
 		testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
 		if (waitval && waitval == testval) {
-			state->nwaiters--;
 			ewc->value = waitval;
 			consumed |= waitval;
 			xnsynch_wakeup_this_sleeper(&event->synch, p);
+			if (!(flags & COBALT_EVENT_BCAST))
+				break;
 		}
 	}
 
diff --git a/kernel/cobalt/posix/event.h b/kernel/cobalt/posix/event.h
index 919774c9af..85b8031b22 100644
--- a/kernel/cobalt/posix/event.h
+++ b/kernel/cobalt/posix/event.h
@@ -66,7 +66,8 @@ COBALT_SYSCALL_DECL(event_wait64,
 		     const struct __kernel_timespec __user *u_ts));
 
 COBALT_SYSCALL_DECL(event_sync,
-		    (struct cobalt_event_shadow __user *u_evtsh));
+		(struct cobalt_event_shadow __user *u_evtsh,
+		unsigned int bits, int flags));
 
 COBALT_SYSCALL_DECL(event_destroy,
 		    (struct cobalt_event_shadow __user *u_evtsh));
diff --git a/lib/cobalt/internal.c b/lib/cobalt/internal.c
index bf1e940b71..b142b3b551 100644
--- a/lib/cobalt/internal.c
+++ b/lib/cobalt/internal.c
@@ -460,19 +460,23 @@ int cobalt_event_destroy(cobalt_event_t *event)
 	return XENOMAI_SYSCALL1(sc_cobalt_event_destroy, event);
 }
 
-int cobalt_event_post(cobalt_event_t *event, unsigned int bits)
+static int do_sync_events(cobalt_event_t *event,
+			unsigned int bits, int flags)
 {
-	struct cobalt_event_state *state = get_event_state(event);
-
 	if (bits == 0)
 		return 0;
 
-	__sync_or_and_fetch(&state->value, bits); /* full barrier. */
+	return XENOMAI_SYSCALL3(sc_cobalt_event_sync, event, bits, flags);
+}
 
-	if ((state->flags & COBALT_EVENT_PENDED) == 0)
-		return 0;
+int cobalt_event_signal(cobalt_event_t *event, unsigned int bits)
+{
+	return do_sync_events(event, bits, 0);
+}
 
-	return XENOMAI_SYSCALL1(sc_cobalt_event_sync, event);
+int cobalt_event_broadcast(cobalt_event_t *event, unsigned int bits)
+{
+	return do_sync_events(event, bits, COBALT_EVENT_BCAST);
 }
 
 int cobalt_event_wait(cobalt_event_t *event,
@@ -516,7 +520,7 @@ int cobalt_sem_inquire(sem_t *sem, struct cobalt_sem_info *info,
 		       pid_t *waitlist, size_t waitsz)
 {
 	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
-	
+
 	return XENOMAI_SYSCALL4(sc_cobalt_sem_inquire, _sem,
 				info, waitlist, waitsz);
 }
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/5] copperplate, testsuite: convert to cobalt_event_broadcast()
  2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
  2022-04-06 15:56 ` [PATCH 2/5] cobalt/events: add single-waiter delivery mode Philippe Gerum
@ 2022-04-06 15:56 ` Philippe Gerum
  2022-04-06 15:56 ` [PATCH 4/5] lib/cobalt: deprecate cobalt_event_post() Philippe Gerum
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-06 15:56 UTC (permalink / raw)
  To: xenomai

From: Philippe Gerum <rpm@xenomai.org>

cobalt_event_post() is about to be deprecated, rename to
cobalt_event_broadcast().

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
---
 lib/copperplate/eventobj.c       | 2 +-
 testsuite/smokey/events/events.c | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/lib/copperplate/eventobj.c b/lib/copperplate/eventobj.c
index dce5bd242f..fcc30b2429 100644
--- a/lib/copperplate/eventobj.c
+++ b/lib/copperplate/eventobj.c
@@ -84,7 +84,7 @@ int eventobj_post(struct eventobj *evobj, unsigned int bits)
 {
 	int ret;
 
-	ret = cobalt_event_post(&evobj->core.event, bits);
+	ret = cobalt_event_broadcast(&evobj->core.event, bits);
 	if (ret)
 		return ret;
 
diff --git a/testsuite/smokey/events/events.c b/testsuite/smokey/events/events.c
index 79711795ac..ee1de05baf 100644
--- a/testsuite/smokey/events/events.c
+++ b/testsuite/smokey/events/events.c
@@ -101,13 +101,13 @@ static int run_events(struct smokey_test *t, int argc, char *const argv[])
 	sem_post(&c.start);
 	sem_wait(&c.sem);
 
-	if (!__T(ret, cobalt_event_post(&c.event, 0x12121212)))
+	if (!__T(ret, cobalt_event_broadcast(&c.event, 0x12121212)))
 		return ret;
 
 	sem_wait(&c.sem);
 	usleep(1000);
 
-	if (!__T(ret, cobalt_event_post(&c.event, 0x41414141)))
+	if (!__T(ret, cobalt_event_broadcast(&c.event, 0x41414141)))
 		return ret;
 
 	sem_wait(&c.sem);
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 4/5] lib/cobalt: deprecate cobalt_event_post()
  2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
  2022-04-06 15:56 ` [PATCH 2/5] cobalt/events: add single-waiter delivery mode Philippe Gerum
  2022-04-06 15:56 ` [PATCH 3/5] copperplate, testsuite: convert to cobalt_event_broadcast() Philippe Gerum
@ 2022-04-06 15:56 ` Philippe Gerum
  2022-04-06 15:56 ` [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads Philippe Gerum
  2022-04-07  9:47 ` [PATCH 1/5] cobalt/events: add auto-clear feature Jan Kiszka
  4 siblings, 0 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-06 15:56 UTC (permalink / raw)
  To: xenomai

From: Philippe Gerum <rpm@xenomai.org>

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
---
 include/cobalt/sys/cobalt.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/cobalt/sys/cobalt.h b/include/cobalt/sys/cobalt.h
index 1035a29d52..5dde69da8a 100644
--- a/include/cobalt/sys/cobalt.h
+++ b/include/cobalt/sys/cobalt.h
@@ -113,6 +113,7 @@ int cobalt_event_broadcast(cobalt_event_t *event,
 			unsigned int bits);
 
 /* Backward compatibility with 3.2.x and earlier. */
+__deprecated
 static inline int cobalt_event_post(cobalt_event_t *event,
 				unsigned int bits)
 {
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads
  2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
                   ` (2 preceding siblings ...)
  2022-04-06 15:56 ` [PATCH 4/5] lib/cobalt: deprecate cobalt_event_post() Philippe Gerum
@ 2022-04-06 15:56 ` Philippe Gerum
  2022-04-07  9:51   ` Jan Kiszka
  2022-04-13 11:21   ` Jan Kiszka
  2022-04-07  9:47 ` [PATCH 1/5] cobalt/events: add auto-clear feature Jan Kiszka
  4 siblings, 2 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-06 15:56 UTC (permalink / raw)
  To: xenomai

From: Philippe Gerum <rpm@xenomai.org>

Regular threads should be allowed to write to RTIPC sockets provided
MSG_DONTWAIT is implicitly set for such a request. This would match
the existing behavior with other synchronization objects, such as
semaphores and events, avoiding unnecessary restrictions on usage.

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
---
 kernel/drivers/ipc/bufp.c  | 8 ++++++--
 kernel/drivers/ipc/iddp.c  | 8 ++++++--
 kernel/drivers/ipc/rtipc.c | 4 ++--
 kernel/drivers/ipc/xddp.c  | 6 +++++-
 4 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/kernel/drivers/ipc/bufp.c b/kernel/drivers/ipc/bufp.c
index fd533dba27..565409dd6f 100644
--- a/kernel/drivers/ipc/bufp.c
+++ b/kernel/drivers/ipc/bufp.c
@@ -655,11 +655,15 @@ static ssize_t bufp_write(struct rtdm_fd *fd,
 	struct rtipc_private *priv = rtdm_fd_to_private(fd);
 	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
 	struct bufp_socket *sk = priv->state;
+	int flags = 0;
 
 	if (sk->peer.sipc_port < 0)
 		return -EDESTADDRREQ;
 
-	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+	if (is_secondary_domain())
+		flags = MSG_DONTWAIT;
+
+	return __bufp_sendmsg(fd, &iov, 1, flags, &sk->peer);
 }
 
 static int __bufp_bind_socket(struct rtipc_private *priv,
@@ -682,7 +686,7 @@ static int __bufp_bind_socket(struct rtipc_private *priv,
 	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
 		ret = -EADDRINUSE;
 	cobalt_atomic_leave(s);
-	
+
 	if (ret)
 		return ret;
 
diff --git a/kernel/drivers/ipc/iddp.c b/kernel/drivers/ipc/iddp.c
index a553902326..05d0193394 100644
--- a/kernel/drivers/ipc/iddp.c
+++ b/kernel/drivers/ipc/iddp.c
@@ -255,7 +255,7 @@ static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
 	}
 
 	/* We want to pick one buffer from the queue. */
-	
+
 	for (;;) {
 		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
 		if (unlikely(ret)) {
@@ -522,11 +522,15 @@ static ssize_t iddp_write(struct rtdm_fd *fd,
 	struct rtipc_private *priv = rtdm_fd_to_private(fd);
 	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
 	struct iddp_socket *sk = priv->state;
+	int flags = 0;
 
 	if (sk->peer.sipc_port < 0)
 		return -EDESTADDRREQ;
 
-	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+	if (is_secondary_domain())
+		flags = MSG_DONTWAIT;
+
+	return __iddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
 }
 
 static int __iddp_bind_socket(struct rtdm_fd *fd,
diff --git a/kernel/drivers/ipc/rtipc.c b/kernel/drivers/ipc/rtipc.c
index 859bdab2f2..211b496ec5 100644
--- a/kernel/drivers/ipc/rtipc.c
+++ b/kernel/drivers/ipc/rtipc.c
@@ -428,7 +428,7 @@ static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
 	struct xnselect *block;
 	spl_t s;
 	int ret;
-	
+
 	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
 		return -EINVAL;
 
@@ -480,7 +480,7 @@ static struct rtdm_driver rtipc_driver = {
 		.read_rt	=	rtipc_read,
 		.read_nrt	=	NULL,
 		.write_rt	=	rtipc_write,
-		.write_nrt	=	NULL,
+		.write_nrt	=	rtipc_write, /* MSG_DONTWAIT. */
 		.select		=	rtipc_select,
 	},
 };
diff --git a/kernel/drivers/ipc/xddp.c b/kernel/drivers/ipc/xddp.c
index ae5b720c0c..2ca0da5fd4 100644
--- a/kernel/drivers/ipc/xddp.c
+++ b/kernel/drivers/ipc/xddp.c
@@ -657,11 +657,15 @@ static ssize_t xddp_write(struct rtdm_fd *fd,
 	struct rtipc_private *priv = rtdm_fd_to_private(fd);
 	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
 	struct xddp_socket *sk = priv->state;
+	int flags = 0;
 
 	if (sk->peer.sipc_port < 0)
 		return -EDESTADDRREQ;
 
-	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+	if (is_secondary_domain())
+		flags = MSG_DONTWAIT;
+
+	return __xddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
 }
 
 static int __xddp_bind_socket(struct rtipc_private *priv,
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/5] cobalt/events: add auto-clear feature
  2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
                   ` (3 preceding siblings ...)
  2022-04-06 15:56 ` [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads Philippe Gerum
@ 2022-04-07  9:47 ` Jan Kiszka
  2022-04-07 11:16   ` Philippe Gerum
  4 siblings, 1 reply; 15+ messages in thread
From: Jan Kiszka @ 2022-04-07  9:47 UTC (permalink / raw)
  To: Philippe Gerum, xenomai

On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
> From: Philippe Gerum <rpm@xenomai.org>
> 
> The current implementation does not atomically consume+clear the event
> set to be received by the waiter(s), which makes it useless for
> anything but a plain one-time latch due to the race window this opens
> with a consume[A]->signal[B]->clear[A] sequence.
> 
> To address this issue, let's provide the auto-clear feature with
> __cobalt_event_wait().
> 
> This change affects the ABI by adding the auto-clear mode as an opt-in
> feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
> cobalt_event_init().
> 

Makes sense, but shouldn't autoclear be rather the default then? Which
users are affected? None in-tree so far?

Jan

> Signed-off-by: Philippe Gerum <rpm@xenomai.org>
> ---
>  configure.ac                        |   1 +
>  include/cobalt/uapi/event.h         |   7 +-
>  kernel/cobalt/posix/event.c         |  17 +++-
>  testsuite/smokey/Makefile.am        |   2 +
>  testsuite/smokey/events/Makefile.am |   8 ++
>  testsuite/smokey/events/events.c    | 123 ++++++++++++++++++++++++++++
>  6 files changed, 152 insertions(+), 6 deletions(-)
>  create mode 100644 testsuite/smokey/events/Makefile.am
>  create mode 100644 testsuite/smokey/events/events.c
> 
> diff --git a/configure.ac b/configure.ac
> index 5611b5b8db..15d3e46e9a 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -1002,6 +1002,7 @@ AC_CONFIG_FILES([ \
>  	testsuite/smokey/gdb/Makefile \
>  	testsuite/smokey/y2038/Makefile \
>  	testsuite/smokey/can/Makefile
> +	testsuite/smokey/events/Makefile \
>  	testsuite/clocktest/Makefile \
>  	testsuite/xeno-test/Makefile \
>  	utils/Makefile \
> diff --git a/include/cobalt/uapi/event.h b/include/cobalt/uapi/event.h
> index 8710e8e25f..14ddbcf567 100644
> --- a/include/cobalt/uapi/event.h
> +++ b/include/cobalt/uapi/event.h
> @@ -30,9 +30,10 @@ struct cobalt_event_state {
>  struct cobalt_event;
>  
>  /* Creation flags. */
> -#define COBALT_EVENT_FIFO    0x0
> -#define COBALT_EVENT_PRIO    0x1
> -#define COBALT_EVENT_SHARED  0x2
> +#define COBALT_EVENT_FIFO       0x0
> +#define COBALT_EVENT_PRIO       0x1
> +#define COBALT_EVENT_SHARED     0x2
> +#define COBALT_EVENT_AUTOCLEAR  0x4
>  
>  /* Wait mode. */
>  #define COBALT_EVENT_ALL  0x0
> diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
> index 052c686050..0a1236ad73 100644
> --- a/kernel/cobalt/posix/event.c
> +++ b/kernel/cobalt/posix/event.c
> @@ -146,7 +146,7 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
>  	if (bits == 0) {
>  		/*
>  		 * Special case: we don't wait for any event, we only
> -		 * return the current flag group value.
> +		 * return the pending ones without consuming them.
>  		 */
>  		rbits = state->value;
>  		goto out;
> @@ -155,8 +155,11 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
>  	state->flags |= COBALT_EVENT_PENDED;
>  	rbits = state->value & bits;
>  	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
> -	if (rbits && rbits == testval)
> +	if (rbits && rbits == testval) {
> +		if (event->flags & COBALT_EVENT_AUTOCLEAR)
> +			state->value &= ~rbits;
>  		goto done;
> +	}
>  
>  	if (timeout == XN_NONBLOCK) {
>  		ret = -EWOULDBLOCK;
> @@ -239,7 +242,7 @@ COBALT_SYSCALL(event_wait64, primary,
>  COBALT_SYSCALL(event_sync, current,
>  	       (struct cobalt_event_shadow __user *u_event))
>  {
> -	unsigned int bits, waitval, testval;
> +	unsigned int bits, waitval, testval, consumed = 0;
>  	struct xnthread_wait_context *wc;
>  	struct cobalt_event_state *state;
>  	struct event_wait_context *ewc;
> @@ -275,10 +278,18 @@ COBALT_SYSCALL(event_sync, current,
>  		if (waitval && waitval == testval) {
>  			state->nwaiters--;
>  			ewc->value = waitval;
> +			consumed |= waitval;
>  			xnsynch_wakeup_this_sleeper(&event->synch, p);
>  		}
>  	}
>  
> +	/*
> +	 * If some flags were consumed and auto-clear is enabled,
> +	 * clear the former.
> +	 */
> +	if (consumed && (event->flags & COBALT_EVENT_AUTOCLEAR))
> +		state->value &= ~consumed;
> +
>  	xnsched_run();
>  out:
>  	xnlock_put_irqrestore(&nklock, s);
> diff --git a/testsuite/smokey/Makefile.am b/testsuite/smokey/Makefile.am
> index 4a9773f586..3f0521282b 100644
> --- a/testsuite/smokey/Makefile.am
> +++ b/testsuite/smokey/Makefile.am
> @@ -14,6 +14,7 @@ COBALT_SUBDIRS = 	\
>  	bufp		\
>  	can		\
>  	cpu-affinity	\
> +	events		\
>  	fpu-stress	\
>  	gdb		\
>  	iddp		\
> @@ -53,6 +54,7 @@ DIST_SUBDIRS = 		\
>  	can		\
>  	cpu-affinity	\
>  	dlopen		\
> +	events		\
>  	fpu-stress	\
>  	gdb		\
>  	iddp		\
> diff --git a/testsuite/smokey/events/Makefile.am b/testsuite/smokey/events/Makefile.am
> new file mode 100644
> index 0000000000..37b9f92d7f
> --- /dev/null
> +++ b/testsuite/smokey/events/Makefile.am
> @@ -0,0 +1,8 @@
> +
> +noinst_LIBRARIES = libevents.a
> +
> +libevents_a_SOURCES = events.c
> +
> +libevents_a_CPPFLAGS = 		\
> +	@XENO_USER_CFLAGS@	\
> +	-I$(top_srcdir)/include
> diff --git a/testsuite/smokey/events/events.c b/testsuite/smokey/events/events.c
> new file mode 100644
> index 0000000000..79711795ac
> --- /dev/null
> +++ b/testsuite/smokey/events/events.c
> @@ -0,0 +1,123 @@
> +#include <stdlib.h>
> +#include <unistd.h>
> +#include <smokey/smokey.h>
> +#include <cobalt/time.h>
> +#include <cobalt/semaphore.h>
> +#include <cobalt/pthread.h>
> +#include <cobalt/sys/cobalt.h>
> +#include <boilerplate/time.h>
> +
> +smokey_test_plugin(events,
> +		   SMOKEY_NOARGS,
> +		   "Check event event"
> +);
> +
> +#define LOW_PRIO	1
> +#define HIGH_PRIO	2
> +
> +struct test_context {
> +	cobalt_event_t event;
> +	sem_t start;
> +	sem_t sem;
> +};
> +
> +static void *event_receiver(void *arg)
> +{
> +	struct test_context *p = arg;
> +	struct timespec now, timeout;
> +	unsigned int bits;
> +	int ret;
> +
> +	sem_wait(&p->start);
> +	clock_gettime(CLOCK_MONOTONIC, &now);
> +	timespec_adds(&timeout, &now, 400000000); /* 400ms */
> +
> +	/* Sender is quiet: expect timeout. */
> +	if (!__F(ret, cobalt_event_wait(&p->event, -1U, &bits,
> +				COBALT_EVENT_ANY, &timeout)) ||
> +		!__Tassert(ret == -ETIMEDOUT))
> +		goto fail;
> +
> +	sem_post(&p->sem);
> +
> +	/* Sender should have sent 0x12121212. */
> +	clock_gettime(CLOCK_MONOTONIC, &now);
> +	timespec_adds(&timeout, &now, 400000000); /* 400ms */
> +	if (!__T(ret, cobalt_event_wait(&p->event, -1U, &bits,
> +				COBALT_EVENT_ANY, &timeout)))
> +		goto fail;
> +
> +	if (!__Tassert(bits == 0x12121212))
> +		goto fail;
> +
> +	sem_post(&p->sem);
> +
> +	/* Sender should send 0x41414141 in a moment. */
> +	if (!__T(ret, cobalt_event_wait(&p->event, 0x41414141, &bits,
> +				COBALT_EVENT_ALL, NULL)))
> +		goto fail;
> +
> +	if (!__Tassert(bits == 0x41414141))
> +		goto fail;
> +
> +	sem_post(&p->sem);
> +
> +	return NULL;
> +fail:
> +	exit(-ret);
> +}
> +
> +static int run_events(struct smokey_test *t, int argc, char *const argv[])
> +{
> +	struct sched_param param;
> +	struct test_context c;
> +	pthread_attr_t attr;
> +	void *status = NULL;
> +	pthread_t receiver;
> +	int ret;
> +
> +	param.sched_priority = HIGH_PRIO;
> +	if (!__Tassert(pthread_setschedparam(pthread_self(),
> +			SCHED_FIFO, &param) == 0))
> +		return -EPERM;
> +
> +	sem_init(&c.sem, 0, 0);
> +	sem_init(&c.start, 0, 0);
> +
> +	if (!__T(ret, cobalt_event_init(&c.event, 0,
> +		COBALT_EVENT_FIFO|COBALT_EVENT_AUTOCLEAR)))
> +		return ret;
> +
> +	param.sched_priority = LOW_PRIO;
> +	pthread_attr_init(&attr);
> +	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
> +	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
> +	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
> +	pthread_attr_setschedparam(&attr, &param);
> +
> +	if (!__T(ret, pthread_create(&receiver, &attr, event_receiver, &c)))
> +		return ret;
> +
> +	sem_post(&c.start);
> +	sem_wait(&c.sem);
> +
> +	if (!__T(ret, cobalt_event_post(&c.event, 0x12121212)))
> +		return ret;
> +
> +	sem_wait(&c.sem);
> +	usleep(1000);
> +
> +	if (!__T(ret, cobalt_event_post(&c.event, 0x41414141)))
> +		return ret;
> +
> +	sem_wait(&c.sem);
> +
> +	pthread_join(receiver, &status);
> +	if (!__Tassert(status == NULL))
> +		return -EPROTO;
> +
> +	if (!__T(ret, cobalt_event_destroy(&c.event)))
> +		return ret;
> +
> +	return 0;
> +}

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/5] cobalt/events: add single-waiter delivery mode
  2022-04-06 15:56 ` [PATCH 2/5] cobalt/events: add single-waiter delivery mode Philippe Gerum
@ 2022-04-07  9:49   ` Jan Kiszka
  2022-04-07 11:26     ` Philippe Gerum
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Kiszka @ 2022-04-07  9:49 UTC (permalink / raw)
  To: Philippe Gerum, xenomai

On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
> From: Philippe Gerum <rpm@xenomai.org>
> 
> Allow for sending a set of event bits to the heading waiter only,
> instead of broadcasting them to all waiters.
> 
> This change affects the ABI, since we add a set of operation flags to
> the cobalt_event_sync service, to pass the new COBALT_EVENT_BCAST
> flag. It also affects the internal API as follows:
> 
> - the new cobalt_event_broadcast() call behaves like
>   cobalt_event_post() formerly did, which is broadcasting the event to
>   all waiters atomically.
> 
> - the new cobalt_event_signal() call implements the single-waiter
>   delivery mode we introduce.
> 
> - the former cobalt_event_post() is now a wrapper to
>   cobalt_event_broadcast(), marked with a deprecation flag.

Strictly spoken, deprecation will come with patch 4 only.

Can we have some test cases for the new features as well? Who will use them?

Jan

> 
> Signed-off-by: Philippe Gerum <rpm@xenomai.org>
> ---
>  include/cobalt/sys/cobalt.h | 12 +++++++++++-
>  include/cobalt/uapi/event.h | 10 +++++-----
>  kernel/cobalt/posix/event.c | 37 +++++++++++++++----------------------
>  kernel/cobalt/posix/event.h |  3 ++-
>  lib/cobalt/internal.c       | 20 ++++++++++++--------
>  5 files changed, 45 insertions(+), 37 deletions(-)
> 
> diff --git a/include/cobalt/sys/cobalt.h b/include/cobalt/sys/cobalt.h
> index 46096e8801..1035a29d52 100644
> --- a/include/cobalt/sys/cobalt.h
> +++ b/include/cobalt/sys/cobalt.h
> @@ -106,9 +106,19 @@ int cobalt_event_init(cobalt_event_t *event,
>  		      unsigned int value,
>  		      int flags);
>  
> -int cobalt_event_post(cobalt_event_t *event,
> +int cobalt_event_signal(cobalt_event_t *event,
>  		      unsigned int bits);
>  
> +int cobalt_event_broadcast(cobalt_event_t *event,
> +			unsigned int bits);
> +
> +/* Backward compatibility with 3.2.x and earlier. */
> +static inline int cobalt_event_post(cobalt_event_t *event,
> +				unsigned int bits)
> +{
> +	return cobalt_event_broadcast(event, bits);
> +}
> +
>  int cobalt_event_wait(cobalt_event_t *event,
>  		      unsigned int bits,
>  		      unsigned int *bits_r,
> diff --git a/include/cobalt/uapi/event.h b/include/cobalt/uapi/event.h
> index 14ddbcf567..52b71d4e3b 100644
> --- a/include/cobalt/uapi/event.h
> +++ b/include/cobalt/uapi/event.h
> @@ -22,9 +22,6 @@
>  
>  struct cobalt_event_state {
>  	__u32 value;
> -	__u32 flags;
> -#define COBALT_EVENT_PENDED  0x1
> -	__u32 nwaiters;
>  };
>  
>  struct cobalt_event;
> @@ -36,8 +33,11 @@ struct cobalt_event;
>  #define COBALT_EVENT_AUTOCLEAR  0x4
>  
>  /* Wait mode. */
> -#define COBALT_EVENT_ALL  0x0
> -#define COBALT_EVENT_ANY  0x1
> +#define COBALT_EVENT_ALL    0x0
> +#define COBALT_EVENT_ANY    0x1
> +
> +/* Sync mode. */
> +#define COBALT_EVENT_BCAST  0x1
>  
>  struct cobalt_event_shadow {
>  	__u32 state_offset;
> diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
> index 0a1236ad73..4433e733fd 100644
> --- a/kernel/cobalt/posix/event.c
> +++ b/kernel/cobalt/posix/event.c
> @@ -85,8 +85,6 @@ COBALT_SYSCALL(event_init, current,
>  	synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
>  	xnsynch_init(&event->synch, synflags, NULL);
>  	state->value = value;
> -	state->flags = 0;
> -	state->nwaiters = 0;
>  	stateoff = cobalt_umm_offset(umm, state);
>  	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
>  
> @@ -152,37 +150,31 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
>  		goto out;
>  	}
>  
> -	state->flags |= COBALT_EVENT_PENDED;
>  	rbits = state->value & bits;
>  	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
>  	if (rbits && rbits == testval) {
>  		if (event->flags & COBALT_EVENT_AUTOCLEAR)
>  			state->value &= ~rbits;
> -		goto done;
> +		goto out;
>  	}
>  
>  	if (timeout == XN_NONBLOCK) {
>  		ret = -EWOULDBLOCK;
> -		goto done;
> +		goto out;
>  	}
>  
>  	ewc.value = bits;
>  	ewc.mode = mode;
>  	xnthread_prepare_wait(&ewc.wc);
> -	state->nwaiters++;
>  	info = xnsynch_sleep_on(&event->synch, timeout, tmode);
>  	if (info & XNRMID) {
>  		ret = -EIDRM;
>  		goto out;
>  	}
> -	if (info & (XNBREAK|XNTIMEO)) {
> -		state->nwaiters--;
> +	if (info & (XNBREAK|XNTIMEO))
>  		ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
> -	} else
> +	else
>  		rbits = ewc.value;
> -done:
> -	if (!xnsynch_pended_p(&event->synch))
> -		state->flags &= ~COBALT_EVENT_PENDED;
>  out:
>  	xnlock_put_irqrestore(&nklock, s);
>  
> @@ -240,9 +232,10 @@ COBALT_SYSCALL(event_wait64, primary,
>  }
>  
>  COBALT_SYSCALL(event_sync, current,
> -	       (struct cobalt_event_shadow __user *u_event))
> +	(struct cobalt_event_shadow __user *u_event,
> +		unsigned int bits, int flags))
>  {
> -	unsigned int bits, waitval, testval, consumed = 0;
> +	unsigned int waitval, testval, consumed = 0;
>  	struct xnthread_wait_context *wc;
>  	struct cobalt_event_state *state;
>  	struct event_wait_context *ewc;
> @@ -262,24 +255,24 @@ COBALT_SYSCALL(event_sync, current,
>  		goto out;
>  	}
>  
> -	/*
> -	 * Userland has already updated the bitmask, our job is to
> -	 * wake up any thread which could be satisfied by its current
> -	 * value.
> -	 */
>  	state = event->state;
> -	bits = state->value;
> +	state->value |= bits;
>  
> +	/*
> +	 * Wake up one or more threads satisfied by state->value,
> +	 * depending on the flags.
> +	 */
>  	xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
>  		wc = xnthread_get_wait_context(p);
>  		ewc = container_of(wc, struct event_wait_context, wc);
> -		waitval = ewc->value & bits;
> +		waitval = ewc->value & state->value;
>  		testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
>  		if (waitval && waitval == testval) {
> -			state->nwaiters--;
>  			ewc->value = waitval;
>  			consumed |= waitval;
>  			xnsynch_wakeup_this_sleeper(&event->synch, p);
> +			if (!(flags & COBALT_EVENT_BCAST))
> +				break;
>  		}
>  	}
>  
> diff --git a/kernel/cobalt/posix/event.h b/kernel/cobalt/posix/event.h
> index 919774c9af..85b8031b22 100644
> --- a/kernel/cobalt/posix/event.h
> +++ b/kernel/cobalt/posix/event.h
> @@ -66,7 +66,8 @@ COBALT_SYSCALL_DECL(event_wait64,
>  		     const struct __kernel_timespec __user *u_ts));
>  
>  COBALT_SYSCALL_DECL(event_sync,
> -		    (struct cobalt_event_shadow __user *u_evtsh));
> +		(struct cobalt_event_shadow __user *u_evtsh,
> +		unsigned int bits, int flags));
>  
>  COBALT_SYSCALL_DECL(event_destroy,
>  		    (struct cobalt_event_shadow __user *u_evtsh));
> diff --git a/lib/cobalt/internal.c b/lib/cobalt/internal.c
> index bf1e940b71..b142b3b551 100644
> --- a/lib/cobalt/internal.c
> +++ b/lib/cobalt/internal.c
> @@ -460,19 +460,23 @@ int cobalt_event_destroy(cobalt_event_t *event)
>  	return XENOMAI_SYSCALL1(sc_cobalt_event_destroy, event);
>  }
>  
> -int cobalt_event_post(cobalt_event_t *event, unsigned int bits)
> +static int do_sync_events(cobalt_event_t *event,
> +			unsigned int bits, int flags)
>  {
> -	struct cobalt_event_state *state = get_event_state(event);
> -
>  	if (bits == 0)
>  		return 0;
>  
> -	__sync_or_and_fetch(&state->value, bits); /* full barrier. */
> +	return XENOMAI_SYSCALL3(sc_cobalt_event_sync, event, bits, flags);
> +}
>  
> -	if ((state->flags & COBALT_EVENT_PENDED) == 0)
> -		return 0;
> +int cobalt_event_signal(cobalt_event_t *event, unsigned int bits)
> +{
> +	return do_sync_events(event, bits, 0);
> +}
>  
> -	return XENOMAI_SYSCALL1(sc_cobalt_event_sync, event);
> +int cobalt_event_broadcast(cobalt_event_t *event, unsigned int bits)
> +{
> +	return do_sync_events(event, bits, COBALT_EVENT_BCAST);
>  }
>  
>  int cobalt_event_wait(cobalt_event_t *event,
> @@ -516,7 +520,7 @@ int cobalt_sem_inquire(sem_t *sem, struct cobalt_sem_info *info,
>  		       pid_t *waitlist, size_t waitsz)
>  {
>  	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
> -	
> +
>  	return XENOMAI_SYSCALL4(sc_cobalt_sem_inquire, _sem,
>  				info, waitlist, waitsz);
>  }

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads
  2022-04-06 15:56 ` [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads Philippe Gerum
@ 2022-04-07  9:51   ` Jan Kiszka
  2022-04-07 11:37     ` Philippe Gerum
  2022-04-13 11:21   ` Jan Kiszka
  1 sibling, 1 reply; 15+ messages in thread
From: Jan Kiszka @ 2022-04-07  9:51 UTC (permalink / raw)
  To: Philippe Gerum, xenomai

On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
> From: Philippe Gerum <rpm@xenomai.org>
> 
> Regular threads should be allowed to write to RTIPC sockets provided
> MSG_DONTWAIT is implicitly set for such a request. This would match
> the existing behavior with other synchronization objects, such as
> semaphores and events, avoiding unnecessary restrictions on usage.
> 
> Signed-off-by: Philippe Gerum <rpm@xenomai.org>
> ---
>  kernel/drivers/ipc/bufp.c  | 8 ++++++--
>  kernel/drivers/ipc/iddp.c  | 8 ++++++--
>  kernel/drivers/ipc/rtipc.c | 4 ++--
>  kernel/drivers/ipc/xddp.c  | 6 +++++-
>  4 files changed, 19 insertions(+), 7 deletions(-)
> 
> diff --git a/kernel/drivers/ipc/bufp.c b/kernel/drivers/ipc/bufp.c
> index fd533dba27..565409dd6f 100644
> --- a/kernel/drivers/ipc/bufp.c
> +++ b/kernel/drivers/ipc/bufp.c
> @@ -655,11 +655,15 @@ static ssize_t bufp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct bufp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __bufp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __bufp_bind_socket(struct rtipc_private *priv,
> @@ -682,7 +686,7 @@ static int __bufp_bind_socket(struct rtipc_private *priv,
>  	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
>  		ret = -EADDRINUSE;
>  	cobalt_atomic_leave(s);
> -	
> +
>  	if (ret)
>  		return ret;
>  
> diff --git a/kernel/drivers/ipc/iddp.c b/kernel/drivers/ipc/iddp.c
> index a553902326..05d0193394 100644
> --- a/kernel/drivers/ipc/iddp.c
> +++ b/kernel/drivers/ipc/iddp.c
> @@ -255,7 +255,7 @@ static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
>  	}
>  
>  	/* We want to pick one buffer from the queue. */
> -	
> +
>  	for (;;) {
>  		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
>  		if (unlikely(ret)) {
> @@ -522,11 +522,15 @@ static ssize_t iddp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct iddp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __iddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __iddp_bind_socket(struct rtdm_fd *fd,
> diff --git a/kernel/drivers/ipc/rtipc.c b/kernel/drivers/ipc/rtipc.c
> index 859bdab2f2..211b496ec5 100644
> --- a/kernel/drivers/ipc/rtipc.c
> +++ b/kernel/drivers/ipc/rtipc.c
> @@ -428,7 +428,7 @@ static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
>  	struct xnselect *block;
>  	spl_t s;
>  	int ret;
> -	
> +
>  	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
>  		return -EINVAL;
>  
> @@ -480,7 +480,7 @@ static struct rtdm_driver rtipc_driver = {
>  		.read_rt	=	rtipc_read,
>  		.read_nrt	=	NULL,
>  		.write_rt	=	rtipc_write,
> -		.write_nrt	=	NULL,
> +		.write_nrt	=	rtipc_write, /* MSG_DONTWAIT. */
>  		.select		=	rtipc_select,
>  	},
>  };
> diff --git a/kernel/drivers/ipc/xddp.c b/kernel/drivers/ipc/xddp.c
> index ae5b720c0c..2ca0da5fd4 100644
> --- a/kernel/drivers/ipc/xddp.c
> +++ b/kernel/drivers/ipc/xddp.c
> @@ -657,11 +657,15 @@ static ssize_t xddp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct xddp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __xddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __xddp_bind_socket(struct rtipc_private *priv,

Does this patch have any dependency on 1-4, or was it just bundled with
them by chance?

Jan

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/5] cobalt/events: add auto-clear feature
  2022-04-07  9:47 ` [PATCH 1/5] cobalt/events: add auto-clear feature Jan Kiszka
@ 2022-04-07 11:16   ` Philippe Gerum
  2022-04-07 16:39     ` Jan Kiszka
  0 siblings, 1 reply; 15+ messages in thread
From: Philippe Gerum @ 2022-04-07 11:16 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai


Jan Kiszka <jan.kiszka@siemens.com> writes:

> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>> From: Philippe Gerum <rpm@xenomai.org>
>> 
>> The current implementation does not atomically consume+clear the event
>> set to be received by the waiter(s), which makes it useless for
>> anything but a plain one-time latch due to the race window this opens
>> with a consume[A]->signal[B]->clear[A] sequence.
>> 
>> To address this issue, let's provide the auto-clear feature with
>> __cobalt_event_wait().
>> 
>> This change affects the ABI by adding the auto-clear mode as an opt-in
>> feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
>> cobalt_event_init().
>> 
>
> Makes sense, but shouldn't autoclear be rather the default then? Which
> users are affected? None in-tree so far?
>

There is one user in copperplate:
https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/copperplate/eventobj.c#L87

which indirectly affects rt_event_signal() from the alchemy API:
https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/alchemy/event.c#L453

Nobody raised the issue so far with alchemy, which is why I refrained
from turning the autoclear mode on by default so far. This is debatable,
since no documentation explains the limitation on usage caused by not
having the autoclear mode set.

-- 
Philippe.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/5] cobalt/events: add single-waiter delivery mode
  2022-04-07  9:49   ` Jan Kiszka
@ 2022-04-07 11:26     ` Philippe Gerum
  0 siblings, 0 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-07 11:26 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai


Jan Kiszka <jan.kiszka@siemens.com> writes:

> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>> From: Philippe Gerum <rpm@xenomai.org>
>> 
>> Allow for sending a set of event bits to the heading waiter only,
>> instead of broadcasting them to all waiters.
>> 
>> This change affects the ABI, since we add a set of operation flags to
>> the cobalt_event_sync service, to pass the new COBALT_EVENT_BCAST
>> flag. It also affects the internal API as follows:
>> 
>> - the new cobalt_event_broadcast() call behaves like
>>   cobalt_event_post() formerly did, which is broadcasting the event to
>>   all waiters atomically.
>> 
>> - the new cobalt_event_signal() call implements the single-waiter
>>   delivery mode we introduce.
>> 
>> - the former cobalt_event_post() is now a wrapper to
>>   cobalt_event_broadcast(), marked with a deprecation flag.
>
> Strictly spoken, deprecation will come with patch 4 only.
>
> Can we have some test cases for the new features as well? Who will use them?
>

Patch #1 comes with a new test for the autoclear thingy, I'll add a test
case for the single wakeup as well.

Alchemy will use them. This interface is useful to anyone who needs the
complete event flag group semantics without incurring the (significant)
extra cost of a mutex+condvar combo. Typically, although the broadcast
mode is a no-brainer, implementing the single-wakeup pattern is not
trivial with the latter.

-- 
Philippe.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads
  2022-04-07  9:51   ` Jan Kiszka
@ 2022-04-07 11:37     ` Philippe Gerum
  0 siblings, 0 replies; 15+ messages in thread
From: Philippe Gerum @ 2022-04-07 11:37 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai


Jan Kiszka <jan.kiszka@siemens.com> writes:

> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>> From: Philippe Gerum <rpm@xenomai.org>
>> 
>> Regular threads should be allowed to write to RTIPC sockets provided
>> MSG_DONTWAIT is implicitly set for such a request. This would match
>> the existing behavior with other synchronization objects, such as
>> semaphores and events, avoiding unnecessary restrictions on usage.
>> 
>> Signed-off-by: Philippe Gerum <rpm@xenomai.org>
>> ---
>>  kernel/drivers/ipc/bufp.c  | 8 ++++++--
>>  kernel/drivers/ipc/iddp.c  | 8 ++++++--
>>  kernel/drivers/ipc/rtipc.c | 4 ++--
>>  kernel/drivers/ipc/xddp.c  | 6 +++++-
>>  4 files changed, 19 insertions(+), 7 deletions(-)
>> 
>> diff --git a/kernel/drivers/ipc/bufp.c b/kernel/drivers/ipc/bufp.c
>> index fd533dba27..565409dd6f 100644
>> --- a/kernel/drivers/ipc/bufp.c
>> +++ b/kernel/drivers/ipc/bufp.c
>> @@ -655,11 +655,15 @@ static ssize_t bufp_write(struct rtdm_fd *fd,
>>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>>  	struct bufp_socket *sk = priv->state;
>> +	int flags = 0;
>>  
>>  	if (sk->peer.sipc_port < 0)
>>  		return -EDESTADDRREQ;
>>  
>> -	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
>> +	if (is_secondary_domain())
>> +		flags = MSG_DONTWAIT;
>> +
>> +	return __bufp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>>  }
>>  
>>  static int __bufp_bind_socket(struct rtipc_private *priv,
>> @@ -682,7 +686,7 @@ static int __bufp_bind_socket(struct rtipc_private *priv,
>>  	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
>>  		ret = -EADDRINUSE;
>>  	cobalt_atomic_leave(s);
>> -	
>> +
>>  	if (ret)
>>  		return ret;
>>  
>> diff --git a/kernel/drivers/ipc/iddp.c b/kernel/drivers/ipc/iddp.c
>> index a553902326..05d0193394 100644
>> --- a/kernel/drivers/ipc/iddp.c
>> +++ b/kernel/drivers/ipc/iddp.c
>> @@ -255,7 +255,7 @@ static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
>>  	}
>>  
>>  	/* We want to pick one buffer from the queue. */
>> -	
>> +
>>  	for (;;) {
>>  		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
>>  		if (unlikely(ret)) {
>> @@ -522,11 +522,15 @@ static ssize_t iddp_write(struct rtdm_fd *fd,
>>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>>  	struct iddp_socket *sk = priv->state;
>> +	int flags = 0;
>>  
>>  	if (sk->peer.sipc_port < 0)
>>  		return -EDESTADDRREQ;
>>  
>> -	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
>> +	if (is_secondary_domain())
>> +		flags = MSG_DONTWAIT;
>> +
>> +	return __iddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>>  }
>>  
>>  static int __iddp_bind_socket(struct rtdm_fd *fd,
>> diff --git a/kernel/drivers/ipc/rtipc.c b/kernel/drivers/ipc/rtipc.c
>> index 859bdab2f2..211b496ec5 100644
>> --- a/kernel/drivers/ipc/rtipc.c
>> +++ b/kernel/drivers/ipc/rtipc.c
>> @@ -428,7 +428,7 @@ static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
>>  	struct xnselect *block;
>>  	spl_t s;
>>  	int ret;
>> -	
>> +
>>  	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
>>  		return -EINVAL;
>>  
>> @@ -480,7 +480,7 @@ static struct rtdm_driver rtipc_driver = {
>>  		.read_rt	=	rtipc_read,
>>  		.read_nrt	=	NULL,
>>  		.write_rt	=	rtipc_write,
>> -		.write_nrt	=	NULL,
>> +		.write_nrt	=	rtipc_write, /* MSG_DONTWAIT. */
>>  		.select		=	rtipc_select,
>>  	},
>>  };
>> diff --git a/kernel/drivers/ipc/xddp.c b/kernel/drivers/ipc/xddp.c
>> index ae5b720c0c..2ca0da5fd4 100644
>> --- a/kernel/drivers/ipc/xddp.c
>> +++ b/kernel/drivers/ipc/xddp.c
>> @@ -657,11 +657,15 @@ static ssize_t xddp_write(struct rtdm_fd *fd,
>>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>>  	struct xddp_socket *sk = priv->state;
>> +	int flags = 0;
>>  
>>  	if (sk->peer.sipc_port < 0)
>>  		return -EDESTADDRREQ;
>>  
>> -	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
>> +	if (is_secondary_domain())
>> +		flags = MSG_DONTWAIT;
>> +
>> +	return __xddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>>  }
>>  
>>  static int __xddp_bind_socket(struct rtipc_private *priv,
>
> Does this patch have any dependency on 1-4, or was it just bundled with
> them by chance?
>

Not related. I'm upstreaming my patch queue indistinctly with stuff
which have been in use for a few months here.

-- 
Philippe.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/5] cobalt/events: add auto-clear feature
  2022-04-07 11:16   ` Philippe Gerum
@ 2022-04-07 16:39     ` Jan Kiszka
  2022-04-07 17:09       ` Philippe Gerum
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Kiszka @ 2022-04-07 16:39 UTC (permalink / raw)
  To: Philippe Gerum; +Cc: xenomai

On 07.04.22 13:16, Philippe Gerum wrote:
> 
> Jan Kiszka <jan.kiszka@siemens.com> writes:
> 
>> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>>> From: Philippe Gerum <rpm@xenomai.org>
>>>
>>> The current implementation does not atomically consume+clear the event
>>> set to be received by the waiter(s), which makes it useless for
>>> anything but a plain one-time latch due to the race window this opens
>>> with a consume[A]->signal[B]->clear[A] sequence.
>>>
>>> To address this issue, let's provide the auto-clear feature with
>>> __cobalt_event_wait().
>>>
>>> This change affects the ABI by adding the auto-clear mode as an opt-in
>>> feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
>>> cobalt_event_init().
>>>
>>
>> Makes sense, but shouldn't autoclear be rather the default then? Which
>> users are affected? None in-tree so far?
>>
> 
> There is one user in copperplate:
> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/copperplate/eventobj.c#L87
> 
> which indirectly affects rt_event_signal() from the alchemy API:
> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/alchemy/event.c#L453
> 
> Nobody raised the issue so far with alchemy, which is why I refrained
> from turning the autoclear mode on by default so far. This is debatable,
> since no documentation explains the limitation on usage caused by not
> having the autoclear mode set.
> 

So, the pattern via Alchemy would be

Thread A		Thread B

rt_event_wait()
			rt_event_signal()
rt_event_clear()

That would force users to perform a state check via a side-channel after
clearing the event to avoid starting to waiting if the condition was met
again.

OK, but how could users request the new mode in rt_event_create? There
is not even a EV_AUTOCLEAR flag for it. Do you have more patches pending?

Jan

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/5] cobalt/events: add auto-clear feature
  2022-04-07 16:39     ` Jan Kiszka
@ 2022-04-07 17:09       ` Philippe Gerum
  2022-04-13 11:20         ` Jan Kiszka
  0 siblings, 1 reply; 15+ messages in thread
From: Philippe Gerum @ 2022-04-07 17:09 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai


Jan Kiszka <jan.kiszka@siemens.com> writes:

> On 07.04.22 13:16, Philippe Gerum wrote:
>> 
>> Jan Kiszka <jan.kiszka@siemens.com> writes:
>> 
>>> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>>>> From: Philippe Gerum <rpm@xenomai.org>
>>>>
>>>> The current implementation does not atomically consume+clear the event
>>>> set to be received by the waiter(s), which makes it useless for
>>>> anything but a plain one-time latch due to the race window this opens
>>>> with a consume[A]->signal[B]->clear[A] sequence.
>>>>
>>>> To address this issue, let's provide the auto-clear feature with
>>>> __cobalt_event_wait().
>>>>
>>>> This change affects the ABI by adding the auto-clear mode as an opt-in
>>>> feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
>>>> cobalt_event_init().
>>>>
>>>
>>> Makes sense, but shouldn't autoclear be rather the default then? Which
>>> users are affected? None in-tree so far?
>>>
>> 
>> There is one user in copperplate:
>> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/copperplate/eventobj.c#L87
>> 
>> which indirectly affects rt_event_signal() from the alchemy API:
>> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/alchemy/event.c#L453
>> 
>> Nobody raised the issue so far with alchemy, which is why I refrained
>> from turning the autoclear mode on by default so far. This is debatable,
>> since no documentation explains the limitation on usage caused by not
>> having the autoclear mode set.
>> 
>
> So, the pattern via Alchemy would be
>
> Thread A		Thread B
>
> rt_event_wait()
> 			rt_event_signal()
> rt_event_clear()
>
> That would force users to perform a state check via a side-channel after
> clearing the event to avoid starting to waiting if the condition was met
> again.
>
> OK, but how could users request the new mode in rt_event_create? There
> is not even a EV_AUTOCLEAR flag for it. Do you have more patches pending?
>

The alternative I see is:

- assume that some people might be expecting the current - fragile to
  say the least - behavior, which means that we should add a flag to
  rt_event_create() in order to enable the auto-clear mode for all
  others.

- consider the current behavior as broken beyond recognition, and force
  in the auto-clear mode for all alchemy events, at the expense of
  requiring the folks who have been using a side-channel to paper over
  the current misdesign, to fix their stuff the right way, based on the
  auto-clear behavior.

IMHO, everyone would be better off with #2, because it would just work
in all cases. The side-channel would simply become a useless but
innocuous noise if present.

Which way should we go is debatable, this is why I did not issue any
patch changing the alchemy interface yet.

-- 
Philippe.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/5] cobalt/events: add auto-clear feature
  2022-04-07 17:09       ` Philippe Gerum
@ 2022-04-13 11:20         ` Jan Kiszka
  0 siblings, 0 replies; 15+ messages in thread
From: Jan Kiszka @ 2022-04-13 11:20 UTC (permalink / raw)
  To: Philippe Gerum; +Cc: xenomai

On 07.04.22 19:09, Philippe Gerum wrote:
> 
> Jan Kiszka <jan.kiszka@siemens.com> writes:
> 
>> On 07.04.22 13:16, Philippe Gerum wrote:
>>>
>>> Jan Kiszka <jan.kiszka@siemens.com> writes:
>>>
>>>> On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
>>>>> From: Philippe Gerum <rpm@xenomai.org>
>>>>>
>>>>> The current implementation does not atomically consume+clear the event
>>>>> set to be received by the waiter(s), which makes it useless for
>>>>> anything but a plain one-time latch due to the race window this opens
>>>>> with a consume[A]->signal[B]->clear[A] sequence.
>>>>>
>>>>> To address this issue, let's provide the auto-clear feature with
>>>>> __cobalt_event_wait().
>>>>>
>>>>> This change affects the ABI by adding the auto-clear mode as an opt-in
>>>>> feature, enabled by passing COBALT_EVENT_AUTOCLEAR to
>>>>> cobalt_event_init().
>>>>>
>>>>
>>>> Makes sense, but shouldn't autoclear be rather the default then? Which
>>>> users are affected? None in-tree so far?
>>>>
>>>
>>> There is one user in copperplate:
>>> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/copperplate/eventobj.c#L87
>>>
>>> which indirectly affects rt_event_signal() from the alchemy API:
>>> https://source.denx.de/Xenomai/xenomai/-/blob/28158391258eea52650856bef5d3ed6ebaaf813b/lib/alchemy/event.c#L453
>>>
>>> Nobody raised the issue so far with alchemy, which is why I refrained
>>> from turning the autoclear mode on by default so far. This is debatable,
>>> since no documentation explains the limitation on usage caused by not
>>> having the autoclear mode set.
>>>
>>
>> So, the pattern via Alchemy would be
>>
>> Thread A		Thread B
>>
>> rt_event_wait()
>> 			rt_event_signal()
>> rt_event_clear()
>>
>> That would force users to perform a state check via a side-channel after
>> clearing the event to avoid starting to waiting if the condition was met
>> again.
>>
>> OK, but how could users request the new mode in rt_event_create? There
>> is not even a EV_AUTOCLEAR flag for it. Do you have more patches pending?
>>
> 
> The alternative I see is:
> 
> - assume that some people might be expecting the current - fragile to
>   say the least - behavior, which means that we should add a flag to
>   rt_event_create() in order to enable the auto-clear mode for all
>   others.
> 
> - consider the current behavior as broken beyond recognition, and force
>   in the auto-clear mode for all alchemy events, at the expense of
>   requiring the folks who have been using a side-channel to paper over
>   the current misdesign, to fix their stuff the right way, based on the
>   auto-clear behavior.
> 
> IMHO, everyone would be better off with #2, because it would just work
> in all cases. The side-channel would simply become a useless but
> innocuous noise if present.
> 
> Which way should we go is debatable, this is why I did not issue any
> patch changing the alchemy interface yet.
> 

Let's go for option #2 then and include the alchemy changes as well.
Will only target the next major release anyway.

Jan

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads
  2022-04-06 15:56 ` [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads Philippe Gerum
  2022-04-07  9:51   ` Jan Kiszka
@ 2022-04-13 11:21   ` Jan Kiszka
  1 sibling, 0 replies; 15+ messages in thread
From: Jan Kiszka @ 2022-04-13 11:21 UTC (permalink / raw)
  To: Philippe Gerum, xenomai

On 06.04.22 17:56, Philippe Gerum via Xenomai wrote:
> From: Philippe Gerum <rpm@xenomai.org>
> 
> Regular threads should be allowed to write to RTIPC sockets provided
> MSG_DONTWAIT is implicitly set for such a request. This would match
> the existing behavior with other synchronization objects, such as
> semaphores and events, avoiding unnecessary restrictions on usage.
> 
> Signed-off-by: Philippe Gerum <rpm@xenomai.org>
> ---
>  kernel/drivers/ipc/bufp.c  | 8 ++++++--
>  kernel/drivers/ipc/iddp.c  | 8 ++++++--
>  kernel/drivers/ipc/rtipc.c | 4 ++--
>  kernel/drivers/ipc/xddp.c  | 6 +++++-
>  4 files changed, 19 insertions(+), 7 deletions(-)
> 
> diff --git a/kernel/drivers/ipc/bufp.c b/kernel/drivers/ipc/bufp.c
> index fd533dba27..565409dd6f 100644
> --- a/kernel/drivers/ipc/bufp.c
> +++ b/kernel/drivers/ipc/bufp.c
> @@ -655,11 +655,15 @@ static ssize_t bufp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct bufp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __bufp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __bufp_bind_socket(struct rtipc_private *priv,
> @@ -682,7 +686,7 @@ static int __bufp_bind_socket(struct rtipc_private *priv,
>  	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
>  		ret = -EADDRINUSE;
>  	cobalt_atomic_leave(s);
> -	
> +
>  	if (ret)
>  		return ret;
>  
> diff --git a/kernel/drivers/ipc/iddp.c b/kernel/drivers/ipc/iddp.c
> index a553902326..05d0193394 100644
> --- a/kernel/drivers/ipc/iddp.c
> +++ b/kernel/drivers/ipc/iddp.c
> @@ -255,7 +255,7 @@ static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
>  	}
>  
>  	/* We want to pick one buffer from the queue. */
> -	
> +
>  	for (;;) {
>  		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
>  		if (unlikely(ret)) {
> @@ -522,11 +522,15 @@ static ssize_t iddp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct iddp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __iddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __iddp_bind_socket(struct rtdm_fd *fd,
> diff --git a/kernel/drivers/ipc/rtipc.c b/kernel/drivers/ipc/rtipc.c
> index 859bdab2f2..211b496ec5 100644
> --- a/kernel/drivers/ipc/rtipc.c
> +++ b/kernel/drivers/ipc/rtipc.c
> @@ -428,7 +428,7 @@ static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
>  	struct xnselect *block;
>  	spl_t s;
>  	int ret;
> -	
> +
>  	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
>  		return -EINVAL;
>  
> @@ -480,7 +480,7 @@ static struct rtdm_driver rtipc_driver = {
>  		.read_rt	=	rtipc_read,
>  		.read_nrt	=	NULL,
>  		.write_rt	=	rtipc_write,
> -		.write_nrt	=	NULL,
> +		.write_nrt	=	rtipc_write, /* MSG_DONTWAIT. */
>  		.select		=	rtipc_select,
>  	},
>  };
> diff --git a/kernel/drivers/ipc/xddp.c b/kernel/drivers/ipc/xddp.c
> index ae5b720c0c..2ca0da5fd4 100644
> --- a/kernel/drivers/ipc/xddp.c
> +++ b/kernel/drivers/ipc/xddp.c
> @@ -657,11 +657,15 @@ static ssize_t xddp_write(struct rtdm_fd *fd,
>  	struct rtipc_private *priv = rtdm_fd_to_private(fd);
>  	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
>  	struct xddp_socket *sk = priv->state;
> +	int flags = 0;
>  
>  	if (sk->peer.sipc_port < 0)
>  		return -EDESTADDRREQ;
>  
> -	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
> +	if (is_secondary_domain())
> +		flags = MSG_DONTWAIT;
> +
> +	return __xddp_sendmsg(fd, &iov, 1, flags, &sk->peer);
>  }
>  
>  static int __xddp_bind_socket(struct rtipc_private *priv,

Picked this one as logically unrelated for next already.

Thanks,
Jan

-- 
Siemens AG, Technology
Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2022-04-13 11:21 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-06 15:56 [PATCH 1/5] cobalt/events: add auto-clear feature Philippe Gerum
2022-04-06 15:56 ` [PATCH 2/5] cobalt/events: add single-waiter delivery mode Philippe Gerum
2022-04-07  9:49   ` Jan Kiszka
2022-04-07 11:26     ` Philippe Gerum
2022-04-06 15:56 ` [PATCH 3/5] copperplate, testsuite: convert to cobalt_event_broadcast() Philippe Gerum
2022-04-06 15:56 ` [PATCH 4/5] lib/cobalt: deprecate cobalt_event_post() Philippe Gerum
2022-04-06 15:56 ` [PATCH 5/5] drivers: ipc: enable non-blocking write from regular threads Philippe Gerum
2022-04-07  9:51   ` Jan Kiszka
2022-04-07 11:37     ` Philippe Gerum
2022-04-13 11:21   ` Jan Kiszka
2022-04-07  9:47 ` [PATCH 1/5] cobalt/events: add auto-clear feature Jan Kiszka
2022-04-07 11:16   ` Philippe Gerum
2022-04-07 16:39     ` Jan Kiszka
2022-04-07 17:09       ` Philippe Gerum
2022-04-13 11:20         ` Jan Kiszka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.