All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
@ 2021-06-08  7:04 Hongzhan Chen
  2021-06-08  8:27 ` Jan Kiszka
  0 siblings, 1 reply; 9+ messages in thread
From: Hongzhan Chen @ 2021-06-08  7:04 UTC (permalink / raw)
  To: xenomai

Add one or more "signal slots" per possible cause of in-band signal
into struct xnthread to get rid of dynamic allocation.

For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
are synchronous and mutually exclusive due to the oob->in-band transition.
But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.

All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
cannot pile up though.

Including SIGTERM, we have totally 6 slots.

Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>

diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index bc83cc556..29795f79c 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -42,6 +42,7 @@
  */
 #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
 #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
+#define XNTHREAD_SIGNALS_NUM  6
 
 struct xnthread;
 struct xnsched;
@@ -51,6 +52,13 @@ struct xnsched_tpslot;
 struct xnthread_personality;
 struct completion;
 
+struct lostage_signal {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+	int signo, sigval;
+	struct lostage_signal *self; /* Revisit: I-pipe requirement */
+};
+
 struct xnthread_init_attr {
 	struct xnthread_personality *personality;
 	cpumask_t affinity;
@@ -205,6 +213,29 @@ struct xnthread {
 	const char *exe_path;	/* Executable path */
 	u32 proghash;		/* Hash value for exe_path */
 #endif
+	/*
+	 * Each slot handle different cause of signal
+	 * slot 0:
+	 * 	SIGDEBUG_MIGRATE_SIGNAL
+	 * 	SIGDEBUG_MIGRATE_SYSCALL
+	 * 	SIGDEBUG_MIGRATE_FAULT
+	 * 	SIGDEBUG_MIGRATE_PRIOINV
+	 * 	SIGDEBUG_NOMLOCK
+	 * 	SIGDEBUG_RESCNT_IMBALANCE
+	 * 	SIGDEBUG_LOCK_BREAK
+	 * 	SIGDEBUG_MUTEX_SLEEP
+	 * slot 1:
+	 * 	SIGDEBUG_WATCHDOG
+	 * slot 2:
+	 * 	SIGSHADOW_ACTION_HARDEN
+	 * slot 3:
+	 * 	SIGSHADOW_ACTION_BACKTRACE
+	 * slot 4:
+	 * 	SIGSHADOW_ACTION_HOME
+	 * slot 5:
+	 * 	SIGTERM
+	 */
+	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
 };
 
 static inline int xnthread_get_state(const struct xnthread *thread)
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 9c7753ac4..ebb798b58 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
 }
 EXPORT_SYMBOL_GPL(xnthread_relax);
 
-struct lostage_signal {
-	struct pipeline_inband_work inband_work; /* Must be first. */
-	struct task_struct *task;
-	int signo, sigval;
-	struct lostage_signal *self; /* Revisit: I-pipe requirement */
-};
-
 static inline void do_kthread_signal(struct task_struct *p,
 				     struct xnthread *thread,
 				     struct lostage_signal *rq)
@@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
 	} else
 		send_sig(signo, p, 1);
 out:
-	xnfree(rq->self);
+	memset(rq->self, 0, sizeof(*rq));
 }
 
 static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
@@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
 }
 EXPORT_SYMBOL_GPL(xnthread_demote);
 
+int get_slot_index_from_sig(int sig, int arg)
+{
+	int action;
+
+	switch (sig) {
+	case SIGDEBUG:
+		switch (arg) {
+		case SIGDEBUG_WATCHDOG:
+			return 1;
+		case SIGDEBUG_MIGRATE_SIGNAL:
+		case SIGDEBUG_MIGRATE_SYSCALL:
+		case SIGDEBUG_MIGRATE_FAULT:
+		case SIGDEBUG_MIGRATE_PRIOINV:
+		case SIGDEBUG_NOMLOCK:
+		case SIGDEBUG_RESCNT_IMBALANCE:
+		case SIGDEBUG_LOCK_BREAK:
+		case SIGDEBUG_MUTEX_SLEEP:
+			return 0;
+		}
+		break;
+	case SIGSHADOW:
+		action = sigshadow_action(arg);
+		switch (action) {
+		case SIGSHADOW_ACTION_HARDEN:
+			return 2;
+		case SIGSHADOW_ACTION_BACKTRACE:
+			return 3;
+		case SIGSHADOW_ACTION_HOME:
+			return 4;
+		}
+		break;
+	case SIGTERM:
+		return 5;
+	}
+
+	return -1;
+}
+
 void xnthread_signal(struct xnthread *thread, int sig, int arg)
 {
 	struct lostage_signal *sigwork;
+	int slot;
+
+	slot = get_slot_index_from_sig(sig, arg);
 
-	sigwork = xnmalloc(sizeof(*sigwork));
-	if (WARN_ON(sigwork == NULL))
+	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
 		return;
 
+	sigwork = &thread->sigarray[slot];
+
 	sigwork->inband_work = (struct pipeline_inband_work)
 			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
 					lostage_task_signal);
-- 
2.17.1



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08  7:04 [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal Hongzhan Chen
@ 2021-06-08  8:27 ` Jan Kiszka
  2021-06-08  9:13   ` Chen, Hongzhan
  2021-06-09  5:10   ` Chen, Hongzhan
  0 siblings, 2 replies; 9+ messages in thread
From: Jan Kiszka @ 2021-06-08  8:27 UTC (permalink / raw)
  To: Hongzhan Chen, xenomai

On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
> Add one or more "signal slots" per possible cause of in-band signal
> into struct xnthread to get rid of dynamic allocation.
> 
> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
> are synchronous and mutually exclusive due to the oob->in-band transition.
> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
> 
> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
> cannot pile up though.
> 
> Including SIGTERM, we have totally 6 slots.
> 

Please rebase on top of next, rather than wip/dovetail. It will replace
the related patch in wip/dovetail.

> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
> 
> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
> index bc83cc556..29795f79c 100644
> --- a/include/cobalt/kernel/thread.h
> +++ b/include/cobalt/kernel/thread.h
> @@ -42,6 +42,7 @@
>   */
>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
> +#define XNTHREAD_SIGNALS_NUM  6
>  
>  struct xnthread;
>  struct xnsched;
> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>  struct xnthread_personality;
>  struct completion;
>  
> +struct lostage_signal {
> +	struct pipeline_inband_work inband_work; /* Must be first. */
> +	struct task_struct *task;
> +	int signo, sigval;
> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
> +};
> +
>  struct xnthread_init_attr {
>  	struct xnthread_personality *personality;
>  	cpumask_t affinity;
> @@ -205,6 +213,29 @@ struct xnthread {
>  	const char *exe_path;	/* Executable path */
>  	u32 proghash;		/* Hash value for exe_path */
>  #endif
> +	/*
> +	 * Each slot handle different cause of signal
> +	 * slot 0:
> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
> +	 * 	SIGDEBUG_MIGRATE_FAULT
> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
> +	 * 	SIGDEBUG_NOMLOCK
> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
> +	 * 	SIGDEBUG_LOCK_BREAK
> +	 * 	SIGDEBUG_MUTEX_SLEEP
> +	 * slot 1:
> +	 * 	SIGDEBUG_WATCHDOG

I'm still a bit undecided if we should serialize watchdog events with
other SIGDEBUG reasons, but that could also be done on top later on as
it appears to me now as slightly more complicated.

> +	 * slot 2:
> +	 * 	SIGSHADOW_ACTION_HARDEN
> +	 * slot 3:
> +	 * 	SIGSHADOW_ACTION_BACKTRACE
> +	 * slot 4:
> +	 * 	SIGSHADOW_ACTION_HOME
> +	 * slot 5:
> +	 * 	SIGTERM
> +	 */

Could we also define names for the slots?

> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>  };
>  
>  static inline int xnthread_get_state(const struct xnthread *thread)
> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
> index 9c7753ac4..ebb798b58 100644
> --- a/kernel/cobalt/thread.c
> +++ b/kernel/cobalt/thread.c
> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>  }
>  EXPORT_SYMBOL_GPL(xnthread_relax);
>  
> -struct lostage_signal {
> -	struct pipeline_inband_work inband_work; /* Must be first. */
> -	struct task_struct *task;
> -	int signo, sigval;
> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
> -};
> -
>  static inline void do_kthread_signal(struct task_struct *p,
>  				     struct xnthread *thread,
>  				     struct lostage_signal *rq)
> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>  	} else
>  		send_sig(signo, p, 1);
>  out:
> -	xnfree(rq->self);
> +	memset(rq->self, 0, sizeof(*rq));
>  }
>  
>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>  }
>  EXPORT_SYMBOL_GPL(xnthread_demote);
>  
> +int get_slot_index_from_sig(int sig, int arg)
> +{
> +	int action;
> +
> +	switch (sig) {
> +	case SIGDEBUG:
> +		switch (arg) {
> +		case SIGDEBUG_WATCHDOG:
> +			return 1;
> +		case SIGDEBUG_MIGRATE_SIGNAL:
> +		case SIGDEBUG_MIGRATE_SYSCALL:
> +		case SIGDEBUG_MIGRATE_FAULT:
> +		case SIGDEBUG_MIGRATE_PRIOINV:
> +		case SIGDEBUG_NOMLOCK:
> +		case SIGDEBUG_RESCNT_IMBALANCE:
> +		case SIGDEBUG_LOCK_BREAK:
> +		case SIGDEBUG_MUTEX_SLEEP:
> +			return 0;
> +		}
> +		break;
> +	case SIGSHADOW:
> +		action = sigshadow_action(arg);
> +		switch (action) {
> +		case SIGSHADOW_ACTION_HARDEN:
> +			return 2;
> +		case SIGSHADOW_ACTION_BACKTRACE:
> +			return 3;
> +		case SIGSHADOW_ACTION_HOME:
> +			return 4;
> +		}
> +		break;
> +	case SIGTERM:
> +		return 5;
> +	}
> +
> +	return -1;
> +}
> +
>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>  {
>  	struct lostage_signal *sigwork;
> +	int slot;
> +
> +	slot = get_slot_index_from_sig(sig, arg);
>  
> -	sigwork = xnmalloc(sizeof(*sigwork));
> -	if (WARN_ON(sigwork == NULL))
> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>  		return;

Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
happen (over-defensive programming).

>  
> +	sigwork = &thread->sigarray[slot];
> +

Does this automatically handle the case of duplicate submissions on the
same slot?

>  	sigwork->inband_work = (struct pipeline_inband_work)
>  			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
>  					lostage_task_signal);
> 

Jan

-- 
Siemens AG, T RDA IOT
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08  8:27 ` Jan Kiszka
@ 2021-06-08  9:13   ` Chen, Hongzhan
  2021-06-08 10:03     ` Jan Kiszka
  2021-06-09  5:10   ` Chen, Hongzhan
  1 sibling, 1 reply; 9+ messages in thread
From: Chen, Hongzhan @ 2021-06-08  9:13 UTC (permalink / raw)
  To: Jan Kiszka, xenomai

>On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>> Add one or more "signal slots" per possible cause of in-band signal
>> into struct xnthread to get rid of dynamic allocation.
>> 
>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>> are synchronous and mutually exclusive due to the oob->in-band transition.
>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>> 
>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>> cannot pile up though.
>> 
>> Including SIGTERM, we have totally 6 slots.
>> 
>
>Please rebase on top of next, rather than wip/dovetail. It will replace
>the related patch in wip/dovetail.
>
>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>> 
>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>> index bc83cc556..29795f79c 100644
>> --- a/include/cobalt/kernel/thread.h
>> +++ b/include/cobalt/kernel/thread.h
>> @@ -42,6 +42,7 @@
>>   */
>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>> +#define XNTHREAD_SIGNALS_NUM  6
>>  
>>  struct xnthread;
>>  struct xnsched;
>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>  struct xnthread_personality;
>>  struct completion;
>>  
>> +struct lostage_signal {
>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>> +	struct task_struct *task;
>> +	int signo, sigval;
>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>> +};
>> +
>>  struct xnthread_init_attr {
>>  	struct xnthread_personality *personality;
>>  	cpumask_t affinity;
>> @@ -205,6 +213,29 @@ struct xnthread {
>>  	const char *exe_path;	/* Executable path */
>>  	u32 proghash;		/* Hash value for exe_path */
>>  #endif
>> +	/*
>> +	 * Each slot handle different cause of signal
>> +	 * slot 0:
>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>> +	 * 	SIGDEBUG_NOMLOCK
>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>> +	 * 	SIGDEBUG_LOCK_BREAK
>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>> +	 * slot 1:
>> +	 * 	SIGDEBUG_WATCHDOG
>
>I'm still a bit undecided if we should serialize watchdog events with
>other SIGDEBUG reasons, but that could also be done on top later on as
>it appears to me now as slightly more complicated.
>
>> +	 * slot 2:
>> +	 * 	SIGSHADOW_ACTION_HARDEN
>> +	 * slot 3:
>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>> +	 * slot 4:
>> +	 * 	SIGSHADOW_ACTION_HOME
>> +	 * slot 5:
>> +	 * 	SIGTERM
>> +	 */
>
>Could we also define names for the slots?
>
>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>  };
>>  
>>  static inline int xnthread_get_state(const struct xnthread *thread)
>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>> index 9c7753ac4..ebb798b58 100644
>> --- a/kernel/cobalt/thread.c
>> +++ b/kernel/cobalt/thread.c
>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>  }
>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>  
>> -struct lostage_signal {
>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>> -	struct task_struct *task;
>> -	int signo, sigval;
>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>> -};
>> -
>>  static inline void do_kthread_signal(struct task_struct *p,
>>  				     struct xnthread *thread,
>>  				     struct lostage_signal *rq)
>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>  	} else
>>  		send_sig(signo, p, 1);
>>  out:
>> -	xnfree(rq->self);
>> +	memset(rq->self, 0, sizeof(*rq));
>>  }
>>  
>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>  }
>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>  
>> +int get_slot_index_from_sig(int sig, int arg)
>> +{
>> +	int action;
>> +
>> +	switch (sig) {
>> +	case SIGDEBUG:
>> +		switch (arg) {
>> +		case SIGDEBUG_WATCHDOG:
>> +			return 1;
>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>> +		case SIGDEBUG_MIGRATE_FAULT:
>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>> +		case SIGDEBUG_NOMLOCK:
>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>> +		case SIGDEBUG_LOCK_BREAK:
>> +		case SIGDEBUG_MUTEX_SLEEP:
>> +			return 0;
>> +		}
>> +		break;
>> +	case SIGSHADOW:
>> +		action = sigshadow_action(arg);
>> +		switch (action) {
>> +		case SIGSHADOW_ACTION_HARDEN:
>> +			return 2;
>> +		case SIGSHADOW_ACTION_BACKTRACE:
>> +			return 3;
>> +		case SIGSHADOW_ACTION_HOME:
>> +			return 4;
>> +		}
>> +		break;
>> +	case SIGTERM:
>> +		return 5;
>> +	}
>> +
>> +	return -1;
>> +}
>> +
>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>  {
>>  	struct lostage_signal *sigwork;
>> +	int slot;
>> +
>> +	slot = get_slot_index_from_sig(sig, arg);
>>  
>> -	sigwork = xnmalloc(sizeof(*sigwork));
>> -	if (WARN_ON(sigwork == NULL))
>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>  		return;
>
>Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>happen (over-defensive programming).
>
>>  
>> +	sigwork = &thread->sigarray[slot];
>> +
>
>Does this automatically handle the case of duplicate submissions on the
>same slot?

No, it can not.  If there is duplicate submissions on the same slot , we have to 
create a queue to handle them for each slot. But according to " SIGSHADOW(SIGWINCH) and SIGDEBUG(SIGXCPU) are
 standard non-queueable signals so we should only need a single slot per signal cause." described by Philippe,
they should be non-queueable signals? Please correct me if  I am wrong.

Regards

Hongzhan Chen
>
>>  	sigwork->inband_work = (struct pipeline_inband_work)
>>  			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
>>  					lostage_task_signal);
>> 
>
>Jan
>
>-- 
>Siemens AG, T RDA IOT
>Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08  9:13   ` Chen, Hongzhan
@ 2021-06-08 10:03     ` Jan Kiszka
  2021-06-08 13:52       ` Chen, Hongzhan
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Kiszka @ 2021-06-08 10:03 UTC (permalink / raw)
  To: Chen, Hongzhan, xenomai

On 08.06.21 11:13, Chen, Hongzhan wrote:
>> On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>>> Add one or more "signal slots" per possible cause of in-band signal
>>> into struct xnthread to get rid of dynamic allocation.
>>>
>>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>>> are synchronous and mutually exclusive due to the oob->in-band transition.
>>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>>>
>>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>>> cannot pile up though.
>>>
>>> Including SIGTERM, we have totally 6 slots.
>>>
>>
>> Please rebase on top of next, rather than wip/dovetail. It will replace
>> the related patch in wip/dovetail.
>>
>>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>>>
>>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>>> index bc83cc556..29795f79c 100644
>>> --- a/include/cobalt/kernel/thread.h
>>> +++ b/include/cobalt/kernel/thread.h
>>> @@ -42,6 +42,7 @@
>>>   */
>>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>>> +#define XNTHREAD_SIGNALS_NUM  6
>>>  
>>>  struct xnthread;
>>>  struct xnsched;
>>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>>  struct xnthread_personality;
>>>  struct completion;
>>>  
>>> +struct lostage_signal {
>>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>>> +	struct task_struct *task;
>>> +	int signo, sigval;
>>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>> +};
>>> +
>>>  struct xnthread_init_attr {
>>>  	struct xnthread_personality *personality;
>>>  	cpumask_t affinity;
>>> @@ -205,6 +213,29 @@ struct xnthread {
>>>  	const char *exe_path;	/* Executable path */
>>>  	u32 proghash;		/* Hash value for exe_path */
>>>  #endif
>>> +	/*
>>> +	 * Each slot handle different cause of signal
>>> +	 * slot 0:
>>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>>> +	 * 	SIGDEBUG_NOMLOCK
>>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>>> +	 * 	SIGDEBUG_LOCK_BREAK
>>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>>> +	 * slot 1:
>>> +	 * 	SIGDEBUG_WATCHDOG
>>
>> I'm still a bit undecided if we should serialize watchdog events with
>> other SIGDEBUG reasons, but that could also be done on top later on as
>> it appears to me now as slightly more complicated.
>>
>>> +	 * slot 2:
>>> +	 * 	SIGSHADOW_ACTION_HARDEN
>>> +	 * slot 3:
>>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>>> +	 * slot 4:
>>> +	 * 	SIGSHADOW_ACTION_HOME
>>> +	 * slot 5:
>>> +	 * 	SIGTERM
>>> +	 */
>>
>> Could we also define names for the slots?
>>
>>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>>  };
>>>  
>>>  static inline int xnthread_get_state(const struct xnthread *thread)
>>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>>> index 9c7753ac4..ebb798b58 100644
>>> --- a/kernel/cobalt/thread.c
>>> +++ b/kernel/cobalt/thread.c
>>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>>  }
>>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>>  
>>> -struct lostage_signal {
>>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>>> -	struct task_struct *task;
>>> -	int signo, sigval;
>>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>> -};
>>> -
>>>  static inline void do_kthread_signal(struct task_struct *p,
>>>  				     struct xnthread *thread,
>>>  				     struct lostage_signal *rq)
>>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>>  	} else
>>>  		send_sig(signo, p, 1);
>>>  out:
>>> -	xnfree(rq->self);
>>> +	memset(rq->self, 0, sizeof(*rq));
>>>  }
>>>  
>>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>>  }
>>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>>  
>>> +int get_slot_index_from_sig(int sig, int arg)
>>> +{
>>> +	int action;
>>> +
>>> +	switch (sig) {
>>> +	case SIGDEBUG:
>>> +		switch (arg) {
>>> +		case SIGDEBUG_WATCHDOG:
>>> +			return 1;
>>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>>> +		case SIGDEBUG_MIGRATE_FAULT:
>>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>>> +		case SIGDEBUG_NOMLOCK:
>>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>>> +		case SIGDEBUG_LOCK_BREAK:
>>> +		case SIGDEBUG_MUTEX_SLEEP:
>>> +			return 0;
>>> +		}
>>> +		break;
>>> +	case SIGSHADOW:
>>> +		action = sigshadow_action(arg);
>>> +		switch (action) {
>>> +		case SIGSHADOW_ACTION_HARDEN:
>>> +			return 2;
>>> +		case SIGSHADOW_ACTION_BACKTRACE:
>>> +			return 3;
>>> +		case SIGSHADOW_ACTION_HOME:
>>> +			return 4;
>>> +		}
>>> +		break;
>>> +	case SIGTERM:
>>> +		return 5;
>>> +	}
>>> +
>>> +	return -1;
>>> +}
>>> +
>>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>>  {
>>>  	struct lostage_signal *sigwork;
>>> +	int slot;
>>> +
>>> +	slot = get_slot_index_from_sig(sig, arg);
>>>  
>>> -	sigwork = xnmalloc(sizeof(*sigwork));
>>> -	if (WARN_ON(sigwork == NULL))
>>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>>  		return;
>>
>> Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>> happen (over-defensive programming).
>>
>>>  
>>> +	sigwork = &thread->sigarray[slot];
>>> +
>>
>> Does this automatically handle the case of duplicate submissions on the
>> same slot?
> 
> No, it can not.  If there is duplicate submissions on the same slot , we have to 
> create a queue to handle them for each slot. But according to " SIGSHADOW(SIGWINCH) and SIGDEBUG(SIGXCPU) are
>  standard non-queueable signals so we should only need a single slot per signal cause." described by Philippe,
> they should be non-queueable signals? Please correct me if  I am wrong.

I'm not concerned about queuing, rather about that we might crash or
somehow invalidate the already submitted event. That's why we should
think this case through even if impossible for some cases but maybe not all.

Jan

-- 
Siemens AG, T RDA IOT
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08 10:03     ` Jan Kiszka
@ 2021-06-08 13:52       ` Chen, Hongzhan
  2021-06-09  1:46         ` Chen, Hongzhan
  0 siblings, 1 reply; 9+ messages in thread
From: Chen, Hongzhan @ 2021-06-08 13:52 UTC (permalink / raw)
  To: Jan Kiszka, xenomai



>-----Original Message-----
>From: Jan Kiszka <jan.kiszka@siemens.com> 
>Sent: Tuesday, June 8, 2021 6:03 PM
>To: Chen, Hongzhan <hongzhan.chen@intel.com>; xenomai@xenomai.org
>Cc: Philippe Gerum <rpm@xenomai.org>
>Subject: Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
>
>On 08.06.21 11:13, Chen, Hongzhan wrote:
>>> On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>>>> Add one or more "signal slots" per possible cause of in-band signal
>>>> into struct xnthread to get rid of dynamic allocation.
>>>>
>>>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>>>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>>>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>>>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>>>> are synchronous and mutually exclusive due to the oob->in-band transition.
>>>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>>>>
>>>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>>>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>>>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>>>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>>>> cannot pile up though.
>>>>
>>>> Including SIGTERM, we have totally 6 slots.
>>>>
>>>
>>> Please rebase on top of next, rather than wip/dovetail. It will replace
>>> the related patch in wip/dovetail.
>>>
>>>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>>>>
>>>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>>>> index bc83cc556..29795f79c 100644
>>>> --- a/include/cobalt/kernel/thread.h
>>>> +++ b/include/cobalt/kernel/thread.h
>>>> @@ -42,6 +42,7 @@
>>>>   */
>>>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>>>> +#define XNTHREAD_SIGNALS_NUM  6
>>>>  
>>>>  struct xnthread;
>>>>  struct xnsched;
>>>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>>>  struct xnthread_personality;
>>>>  struct completion;
>>>>  
>>>> +struct lostage_signal {
>>>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>>>> +	struct task_struct *task;
>>>> +	int signo, sigval;
>>>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>> +};
>>>> +
>>>>  struct xnthread_init_attr {
>>>>  	struct xnthread_personality *personality;
>>>>  	cpumask_t affinity;
>>>> @@ -205,6 +213,29 @@ struct xnthread {
>>>>  	const char *exe_path;	/* Executable path */
>>>>  	u32 proghash;		/* Hash value for exe_path */
>>>>  #endif
>>>> +	/*
>>>> +	 * Each slot handle different cause of signal
>>>> +	 * slot 0:
>>>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>>>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>>>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>>>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>>>> +	 * 	SIGDEBUG_NOMLOCK
>>>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>>>> +	 * 	SIGDEBUG_LOCK_BREAK
>>>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>>>> +	 * slot 1:
>>>> +	 * 	SIGDEBUG_WATCHDOG
>>>
>>> I'm still a bit undecided if we should serialize watchdog events with
>>> other SIGDEBUG reasons, but that could also be done on top later on as
>>> it appears to me now as slightly more complicated.
>>>
>>>> +	 * slot 2:
>>>> +	 * 	SIGSHADOW_ACTION_HARDEN
>>>> +	 * slot 3:
>>>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>>>> +	 * slot 4:
>>>> +	 * 	SIGSHADOW_ACTION_HOME
>>>> +	 * slot 5:
>>>> +	 * 	SIGTERM
>>>> +	 */
>>>
>>> Could we also define names for the slots?
>>>
>>>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>>>  };
>>>>  
>>>>  static inline int xnthread_get_state(const struct xnthread *thread)
>>>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>>>> index 9c7753ac4..ebb798b58 100644
>>>> --- a/kernel/cobalt/thread.c
>>>> +++ b/kernel/cobalt/thread.c
>>>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>>>  }
>>>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>>>  
>>>> -struct lostage_signal {
>>>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>>>> -	struct task_struct *task;
>>>> -	int signo, sigval;
>>>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>> -};
>>>> -
>>>>  static inline void do_kthread_signal(struct task_struct *p,
>>>>  				     struct xnthread *thread,
>>>>  				     struct lostage_signal *rq)
>>>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>>>  	} else
>>>>  		send_sig(signo, p, 1);
>>>>  out:
>>>> -	xnfree(rq->self);
>>>> +	memset(rq->self, 0, sizeof(*rq));
>>>>  }
>>>>  
>>>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>>>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>>>  }
>>>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>>>  
>>>> +int get_slot_index_from_sig(int sig, int arg)
>>>> +{
>>>> +	int action;
>>>> +
>>>> +	switch (sig) {
>>>> +	case SIGDEBUG:
>>>> +		switch (arg) {
>>>> +		case SIGDEBUG_WATCHDOG:
>>>> +			return 1;
>>>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>>>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>>>> +		case SIGDEBUG_MIGRATE_FAULT:
>>>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>>>> +		case SIGDEBUG_NOMLOCK:
>>>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>>>> +		case SIGDEBUG_LOCK_BREAK:
>>>> +		case SIGDEBUG_MUTEX_SLEEP:
>>>> +			return 0;
>>>> +		}
>>>> +		break;
>>>> +	case SIGSHADOW:
>>>> +		action = sigshadow_action(arg);
>>>> +		switch (action) {
>>>> +		case SIGSHADOW_ACTION_HARDEN:
>>>> +			return 2;
>>>> +		case SIGSHADOW_ACTION_BACKTRACE:
>>>> +			return 3;
>>>> +		case SIGSHADOW_ACTION_HOME:
>>>> +			return 4;
>>>> +		}
>>>> +		break;
>>>> +	case SIGTERM:
>>>> +		return 5;
>>>> +	}
>>>> +
>>>> +	return -1;
>>>> +}
>>>> +
>>>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>>>  {
>>>>  	struct lostage_signal *sigwork;
>>>> +	int slot;
>>>> +
>>>> +	slot = get_slot_index_from_sig(sig, arg);
>>>>  
>>>> -	sigwork = xnmalloc(sizeof(*sigwork));
>>>> -	if (WARN_ON(sigwork == NULL))
>>>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>>>  		return;
>>>
>>> Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>>> happen (over-defensive programming).
>>>
>>>>  
>>>> +	sigwork = &thread->sigarray[slot];
>>>> +
>>>
>>> Does this automatically handle the case of duplicate submissions on the
>>> same slot?
>> 
>> No, it can not.  If there is duplicate submissions on the same slot , we have to 
>> create a queue to handle them for each slot. But according to " SIGSHADOW(SIGWINCH) and SIGDEBUG(SIGXCPU) are
>>  standard non-queueable signals so we should only need a single slot per signal cause." described by Philippe,
>> they should be non-queueable signals? Please correct me if  I am wrong.
>
>I'm not concerned about queuing, rather about that we might crash or
>somehow invalidate the already submitted event. That's why we should
>think this case through even if impossible for some cases but maybe not all.

The xnthread_signal is included in critical section protected with bigblock nklock by some callers 
such as xnthread_suspend and xnthread_cancel and handle_setaffinity_event  but some callers do not use the biglock. 
If we use further protection in xnthread_signal , it would be over-protected for those callers which already use the biglock.
What is your suggestions?

Regards

Hongzhan Chen  
>
>Jan
>
>-- 
>Siemens AG, T RDA IOT
>Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08 13:52       ` Chen, Hongzhan
@ 2021-06-09  1:46         ` Chen, Hongzhan
  2021-06-09  5:40           ` Jan Kiszka
  0 siblings, 1 reply; 9+ messages in thread
From: Chen, Hongzhan @ 2021-06-09  1:46 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai



>>-----Original Message-----
>>From: Jan Kiszka <jan.kiszka@siemens.com> 
>>Sent: Tuesday, June 8, 2021 6:03 PM
>>To: Chen, Hongzhan <hongzhan.chen@intel.com>; xenomai@xenomai.org
>>Cc: Philippe Gerum <rpm@xenomai.org>
>>Subject: Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
>>
>>On 08.06.21 11:13, Chen, Hongzhan wrote:
>>>> On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>>>>> Add one or more "signal slots" per possible cause of in-band signal
>>>>> into struct xnthread to get rid of dynamic allocation.
>>>>>
>>>>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>>>>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>>>>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>>>>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>>>>> are synchronous and mutually exclusive due to the oob->in-band transition.
>>>>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>>>>>
>>>>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>>>>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>>>>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>>>>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>>>>> cannot pile up though.
>>>>>
>>>>> Including SIGTERM, we have totally 6 slots.
>>>>>
>>>>
>>>> Please rebase on top of next, rather than wip/dovetail. It will replace
>>>> the related patch in wip/dovetail.
>>>>
>>>>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>>>>>
>>>>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>>>>> index bc83cc556..29795f79c 100644
>>>>> --- a/include/cobalt/kernel/thread.h
>>>>> +++ b/include/cobalt/kernel/thread.h
>>>>> @@ -42,6 +42,7 @@
>>>>>   */
>>>>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>>>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>>>>> +#define XNTHREAD_SIGNALS_NUM  6
>>>>>  
>>>>>  struct xnthread;
>>>>>  struct xnsched;
>>>>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>>>>  struct xnthread_personality;
>>>>>  struct completion;
>>>>>  
>>>>> +struct lostage_signal {
>>>>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>>>>> +	struct task_struct *task;
>>>>> +	int signo, sigval;
>>>>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>>> +};
>>>>> +
>>>>>  struct xnthread_init_attr {
>>>>>  	struct xnthread_personality *personality;
>>>>>  	cpumask_t affinity;
>>>>> @@ -205,6 +213,29 @@ struct xnthread {
>>>>>  	const char *exe_path;	/* Executable path */
>>>>>  	u32 proghash;		/* Hash value for exe_path */
>>>>>  #endif
>>>>> +	/*
>>>>> +	 * Each slot handle different cause of signal
>>>>> +	 * slot 0:
>>>>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>>>>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>>>>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>>>>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>>>>> +	 * 	SIGDEBUG_NOMLOCK
>>>>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>>>>> +	 * 	SIGDEBUG_LOCK_BREAK
>>>>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>>>>> +	 * slot 1:
>>>>> +	 * 	SIGDEBUG_WATCHDOG
>>>>
>>>> I'm still a bit undecided if we should serialize watchdog events with
>>>> other SIGDEBUG reasons, but that could also be done on top later on as
>>>> it appears to me now as slightly more complicated.
>>>>
>>>>> +	 * slot 2:
>>>>> +	 * 	SIGSHADOW_ACTION_HARDEN
>>>>> +	 * slot 3:
>>>>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>>>>> +	 * slot 4:
>>>>> +	 * 	SIGSHADOW_ACTION_HOME
>>>>> +	 * slot 5:
>>>>> +	 * 	SIGTERM
>>>>> +	 */
>>>>
>>>> Could we also define names for the slots?
>>>>
>>>>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>>>>  };
>>>>>  
>>>>>  static inline int xnthread_get_state(const struct xnthread *thread)
>>>>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>>>>> index 9c7753ac4..ebb798b58 100644
>>>>> --- a/kernel/cobalt/thread.c
>>>>> +++ b/kernel/cobalt/thread.c
>>>>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>>>>  }
>>>>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>>>>  
>>>>> -struct lostage_signal {
>>>>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>>>>> -	struct task_struct *task;
>>>>> -	int signo, sigval;
>>>>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>>> -};
>>>>> -
>>>>>  static inline void do_kthread_signal(struct task_struct *p,
>>>>>  				     struct xnthread *thread,
>>>>>  				     struct lostage_signal *rq)
>>>>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>>>>  	} else
>>>>>  		send_sig(signo, p, 1);
>>>>>  out:
>>>>> -	xnfree(rq->self);
>>>>> +	memset(rq->self, 0, sizeof(*rq));
>>>>>  }
>>>>>  
>>>>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>>>>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>>>>  }
>>>>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>>>>  
>>>>> +int get_slot_index_from_sig(int sig, int arg)
>>>>> +{
>>>>> +	int action;
>>>>> +
>>>>> +	switch (sig) {
>>>>> +	case SIGDEBUG:
>>>>> +		switch (arg) {
>>>>> +		case SIGDEBUG_WATCHDOG:
>>>>> +			return 1;
>>>>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>>>>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>>>>> +		case SIGDEBUG_MIGRATE_FAULT:
>>>>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>>>>> +		case SIGDEBUG_NOMLOCK:
>>>>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>>>>> +		case SIGDEBUG_LOCK_BREAK:
>>>>> +		case SIGDEBUG_MUTEX_SLEEP:
>>>>> +			return 0;
>>>>> +		}
>>>>> +		break;
>>>>> +	case SIGSHADOW:
>>>>> +		action = sigshadow_action(arg);
>>>>> +		switch (action) {
>>>>> +		case SIGSHADOW_ACTION_HARDEN:
>>>>> +			return 2;
>>>>> +		case SIGSHADOW_ACTION_BACKTRACE:
>>>>> +			return 3;
>>>>> +		case SIGSHADOW_ACTION_HOME:
>>>>> +			return 4;
>>>>> +		}
>>>>> +		break;
>>>>> +	case SIGTERM:
>>>>> +		return 5;
>>>>> +	}
>>>>> +
>>>>> +	return -1;
>>>>> +}
>>>>> +
>>>>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>>>>  {
>>>>>  	struct lostage_signal *sigwork;
>>>>> +	int slot;
>>>>> +
>>>>> +	slot = get_slot_index_from_sig(sig, arg);
>>>>>  
>>>>> -	sigwork = xnmalloc(sizeof(*sigwork));
>>>>> -	if (WARN_ON(sigwork == NULL))
>>>>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>>>>  		return;
>>>>
>>>> Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>>>> happen (over-defensive programming).
>>>>
>>>>>  
>>>>> +	sigwork = &thread->sigarray[slot];
>>>>> +
>>>>
>>>> Does this automatically handle the case of duplicate submissions on the
>>>> same slot?
>>> 
>>> No, it can not.  If there is duplicate submissions on the same slot , we have to 
>>> create a queue to handle them for each slot. But according to " SIGSHADOW(SIGWINCH) and SIGDEBUG(SIGXCPU) are
>>>  standard non-queueable signals so we should only need a single slot per signal cause." described by Philippe,
>>> they should be non-queueable signals? Please correct me if  I am wrong.
>>
>>I'm not concerned about queuing, rather about that we might crash or
>>somehow invalidate the already submitted event. That's why we should
>>think this case through even if impossible for some cases but maybe not all.

According to my test, issue invalidating the already submitted event really exist in sigdebug test item of testsuite with current
Implementation.  Let me debug and understand more detailed before implement new solution. Thanks for your comments.

Regards

Hongzhan Chen

>
>The xnthread_signal is included in critical section protected with bigblock nklock by some callers 
>such as xnthread_suspend and xnthread_cancel and handle_setaffinity_event  but some callers do not use the biglock. 
>If we use further protection in xnthread_signal , it would be over-protected for those callers which already use the biglock.
>What is your suggestions?
>
>Regards
>
>Hongzhan Chen  
>>
>>Jan
>>
>>-- 
>>Siemens AG, T RDA IOT
>>Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-08  8:27 ` Jan Kiszka
  2021-06-08  9:13   ` Chen, Hongzhan
@ 2021-06-09  5:10   ` Chen, Hongzhan
  2021-06-09  5:36     ` Jan Kiszka
  1 sibling, 1 reply; 9+ messages in thread
From: Chen, Hongzhan @ 2021-06-09  5:10 UTC (permalink / raw)
  To: Jan Kiszka, xenomai


>
>-----Original Message-----
>From: Jan Kiszka <jan.kiszka@siemens.com> 
>Sent: Tuesday, June 8, 2021 4:28 PM
>To: Chen, Hongzhan <hongzhan.chen@intel.com>; xenomai@xenomai.org
>Subject: Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
>
>On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>> Add one or more "signal slots" per possible cause of in-band signal
>> into struct xnthread to get rid of dynamic allocation.
>> 
>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>> are synchronous and mutually exclusive due to the oob->in-band transition.
>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>> 
>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>> cannot pile up though.
>> 
>> Including SIGTERM, we have totally 6 slots.
>> 
>
>Please rebase on top of next, rather than wip/dovetail. It will replace
>the related patch in wip/dovetail.

Actually , the patch is based on https://lab.xenomai.org/xenomai-rpm.git/log/?h=for-upstream/dovetail.
There is still several patches that current patch is depending on have not been merged into next.
Do we need to merge into for-upstream/dovetail at first or I should create new patch for next that I should work on?
Please suggest.

Regards

Hongzhan Chen

>
>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>> 
>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>> index bc83cc556..29795f79c 100644
>> --- a/include/cobalt/kernel/thread.h
>> +++ b/include/cobalt/kernel/thread.h
>> @@ -42,6 +42,7 @@
>>   */
>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>> +#define XNTHREAD_SIGNALS_NUM  6
>>  
>>  struct xnthread;
>>  struct xnsched;
>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>  struct xnthread_personality;
>>  struct completion;
>>  
>> +struct lostage_signal {
>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>> +	struct task_struct *task;
>> +	int signo, sigval;
>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>> +};
>> +
>>  struct xnthread_init_attr {
>>  	struct xnthread_personality *personality;
>>  	cpumask_t affinity;
>> @@ -205,6 +213,29 @@ struct xnthread {
>>  	const char *exe_path;	/* Executable path */
>>  	u32 proghash;		/* Hash value for exe_path */
>>  #endif
>> +	/*
>> +	 * Each slot handle different cause of signal
>> +	 * slot 0:
>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>> +	 * 	SIGDEBUG_NOMLOCK
>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>> +	 * 	SIGDEBUG_LOCK_BREAK
>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>> +	 * slot 1:
>> +	 * 	SIGDEBUG_WATCHDOG
>
>I'm still a bit undecided if we should serialize watchdog events with
>other SIGDEBUG reasons, but that could also be done on top later on as
>it appears to me now as slightly more complicated.
>
>> +	 * slot 2:
>> +	 * 	SIGSHADOW_ACTION_HARDEN
>> +	 * slot 3:
>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>> +	 * slot 4:
>> +	 * 	SIGSHADOW_ACTION_HOME
>> +	 * slot 5:
>> +	 * 	SIGTERM
>> +	 */
>
>Could we also define names for the slots?
>
>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>  };
>>  
>>  static inline int xnthread_get_state(const struct xnthread *thread)
>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>> index 9c7753ac4..ebb798b58 100644
>> --- a/kernel/cobalt/thread.c
>> +++ b/kernel/cobalt/thread.c
>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>  }
>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>  
>> -struct lostage_signal {
>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>> -	struct task_struct *task;
>> -	int signo, sigval;
>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>> -};
>> -
>>  static inline void do_kthread_signal(struct task_struct *p,
>>  				     struct xnthread *thread,
>>  				     struct lostage_signal *rq)
>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>  	} else
>>  		send_sig(signo, p, 1);
>>  out:
>> -	xnfree(rq->self);
>> +	memset(rq->self, 0, sizeof(*rq));
>>  }
>>  
>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>  }
>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>  
>> +int get_slot_index_from_sig(int sig, int arg)
>> +{
>> +	int action;
>> +
>> +	switch (sig) {
>> +	case SIGDEBUG:
>> +		switch (arg) {
>> +		case SIGDEBUG_WATCHDOG:
>> +			return 1;
>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>> +		case SIGDEBUG_MIGRATE_FAULT:
>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>> +		case SIGDEBUG_NOMLOCK:
>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>> +		case SIGDEBUG_LOCK_BREAK:
>> +		case SIGDEBUG_MUTEX_SLEEP:
>> +			return 0;
>> +		}
>> +		break;
>> +	case SIGSHADOW:
>> +		action = sigshadow_action(arg);
>> +		switch (action) {
>> +		case SIGSHADOW_ACTION_HARDEN:
>> +			return 2;
>> +		case SIGSHADOW_ACTION_BACKTRACE:
>> +			return 3;
>> +		case SIGSHADOW_ACTION_HOME:
>> +			return 4;
>> +		}
>> +		break;
>> +	case SIGTERM:
>> +		return 5;
>> +	}
>> +
>> +	return -1;
>> +}
>> +
>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>  {
>>  	struct lostage_signal *sigwork;
>> +	int slot;
>> +
>> +	slot = get_slot_index_from_sig(sig, arg);
>>  
>> -	sigwork = xnmalloc(sizeof(*sigwork));
>> -	if (WARN_ON(sigwork == NULL))
>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>  		return;
>
>Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>happen (over-defensive programming).
>
>>  
>> +	sigwork = &thread->sigarray[slot];
>> +
>
>Does this automatically handle the case of duplicate submissions on the
>same slot?
>
>>  	sigwork->inband_work = (struct pipeline_inband_work)
>>  			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
>>  					lostage_task_signal);
>> 
>
>Jan
>
>-- 
>Siemens AG, T RDA IOT
>Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-09  5:10   ` Chen, Hongzhan
@ 2021-06-09  5:36     ` Jan Kiszka
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Kiszka @ 2021-06-09  5:36 UTC (permalink / raw)
  To: Chen, Hongzhan, xenomai

On 09.06.21 07:10, Chen, Hongzhan wrote:
> 
>>
>> -----Original Message-----
>> From: Jan Kiszka <jan.kiszka@siemens.com> 
>> Sent: Tuesday, June 8, 2021 4:28 PM
>> To: Chen, Hongzhan <hongzhan.chen@intel.com>; xenomai@xenomai.org
>> Subject: Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
>>
>> On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>>> Add one or more "signal slots" per possible cause of in-band signal
>>> into struct xnthread to get rid of dynamic allocation.
>>>
>>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>>> are synchronous and mutually exclusive due to the oob->in-band transition.
>>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>>>
>>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>>> cannot pile up though.
>>>
>>> Including SIGTERM, we have totally 6 slots.
>>>
>>
>> Please rebase on top of next, rather than wip/dovetail. It will replace
>> the related patch in wip/dovetail.
> 
> Actually , the patch is based on https://lab.xenomai.org/xenomai-rpm.git/log/?h=for-upstream/dovetail.
> There is still several patches that current patch is depending on have not been merged into next.
> Do we need to merge into for-upstream/dovetail at first or I should create new patch for next that I should work on?
> Please suggest.

Base on next. This patch has no dependencies on wip/dovetail. It
replaces a patch from the bottom of that branch. And it can be tested
over, eg., 5.4.y-ipipe as well.

Jan

-- 
Siemens AG, T RDA IOT
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
  2021-06-09  1:46         ` Chen, Hongzhan
@ 2021-06-09  5:40           ` Jan Kiszka
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Kiszka @ 2021-06-09  5:40 UTC (permalink / raw)
  To: Chen, Hongzhan; +Cc: xenomai

On 09.06.21 03:46, Chen, Hongzhan wrote:
> 
> 
>>> -----Original Message-----
>>> From: Jan Kiszka <jan.kiszka@siemens.com> 
>>> Sent: Tuesday, June 8, 2021 6:03 PM
>>> To: Chen, Hongzhan <hongzhan.chen@intel.com>; xenomai@xenomai.org
>>> Cc: Philippe Gerum <rpm@xenomai.org>
>>> Subject: Re: [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal
>>>
>>> On 08.06.21 11:13, Chen, Hongzhan wrote:
>>>>> On 08.06.21 09:04, Hongzhan Chen via Xenomai wrote:
>>>>>> Add one or more "signal slots" per possible cause of in-band signal
>>>>>> into struct xnthread to get rid of dynamic allocation.
>>>>>>
>>>>>> For SIGDEBUG, except SIGDEBUG_WATCHDOG all other SIGDEBUG_* cause
>>>>>> including SIGDEBUG_MIGRATE_SIGNAL, SIGDEBUG_MIGRATE_SYSCALL,
>>>>>> SIGDEBUG_MIGRATE_FAULT, SIGDEBUG_MIGRATE_PRIOINV, SIGDEBUG_NOMLOCK,
>>>>>> SIGDEBUG_RESCNT_IMBALANCE, SIGDEBUG_LOCK_BREAK, SIGDEBUG_MUTEX_SLEEP
>>>>>> are synchronous and mutually exclusive due to the oob->in-band transition.
>>>>>> But SIGDEBUG_WATCHDOG is triggered asynchronously by the oob timer.
>>>>>>
>>>>>> All SIGSHADOW_* events need their own slot: SIGSHADOW_ACTION_HARDEN and
>>>>>> SIGSHADOW_ACTION_HOME can be raised by remote threads asynchronously to
>>>>>> the target thread. SIGSHADOW_ACTION_BACKTRACE comes in addition to
>>>>>> SIGDEBUG_MIGRATE_*. For the latter reason, SIGSHADOW_ACTION_BACKTRACE
>>>>>> cannot pile up though.
>>>>>>
>>>>>> Including SIGTERM, we have totally 6 slots.
>>>>>>
>>>>>
>>>>> Please rebase on top of next, rather than wip/dovetail. It will replace
>>>>> the related patch in wip/dovetail.
>>>>>
>>>>>> Signed-off-by: Hongzhan Chen <hongzhan.chen@intel.com>
>>>>>>
>>>>>> diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
>>>>>> index bc83cc556..29795f79c 100644
>>>>>> --- a/include/cobalt/kernel/thread.h
>>>>>> +++ b/include/cobalt/kernel/thread.h
>>>>>> @@ -42,6 +42,7 @@
>>>>>>   */
>>>>>>  #define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
>>>>>>  #define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
>>>>>> +#define XNTHREAD_SIGNALS_NUM  6
>>>>>>  
>>>>>>  struct xnthread;
>>>>>>  struct xnsched;
>>>>>> @@ -51,6 +52,13 @@ struct xnsched_tpslot;
>>>>>>  struct xnthread_personality;
>>>>>>  struct completion;
>>>>>>  
>>>>>> +struct lostage_signal {
>>>>>> +	struct pipeline_inband_work inband_work; /* Must be first. */
>>>>>> +	struct task_struct *task;
>>>>>> +	int signo, sigval;
>>>>>> +	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>>>> +};
>>>>>> +
>>>>>>  struct xnthread_init_attr {
>>>>>>  	struct xnthread_personality *personality;
>>>>>>  	cpumask_t affinity;
>>>>>> @@ -205,6 +213,29 @@ struct xnthread {
>>>>>>  	const char *exe_path;	/* Executable path */
>>>>>>  	u32 proghash;		/* Hash value for exe_path */
>>>>>>  #endif
>>>>>> +	/*
>>>>>> +	 * Each slot handle different cause of signal
>>>>>> +	 * slot 0:
>>>>>> +	 * 	SIGDEBUG_MIGRATE_SIGNAL
>>>>>> +	 * 	SIGDEBUG_MIGRATE_SYSCALL
>>>>>> +	 * 	SIGDEBUG_MIGRATE_FAULT
>>>>>> +	 * 	SIGDEBUG_MIGRATE_PRIOINV
>>>>>> +	 * 	SIGDEBUG_NOMLOCK
>>>>>> +	 * 	SIGDEBUG_RESCNT_IMBALANCE
>>>>>> +	 * 	SIGDEBUG_LOCK_BREAK
>>>>>> +	 * 	SIGDEBUG_MUTEX_SLEEP
>>>>>> +	 * slot 1:
>>>>>> +	 * 	SIGDEBUG_WATCHDOG
>>>>>
>>>>> I'm still a bit undecided if we should serialize watchdog events with
>>>>> other SIGDEBUG reasons, but that could also be done on top later on as
>>>>> it appears to me now as slightly more complicated.
>>>>>
>>>>>> +	 * slot 2:
>>>>>> +	 * 	SIGSHADOW_ACTION_HARDEN
>>>>>> +	 * slot 3:
>>>>>> +	 * 	SIGSHADOW_ACTION_BACKTRACE
>>>>>> +	 * slot 4:
>>>>>> +	 * 	SIGSHADOW_ACTION_HOME
>>>>>> +	 * slot 5:
>>>>>> +	 * 	SIGTERM
>>>>>> +	 */
>>>>>
>>>>> Could we also define names for the slots?
>>>>>
>>>>>> +	struct lostage_signal sigarray[XNTHREAD_SIGNALS_NUM];
>>>>>>  };
>>>>>>  
>>>>>>  static inline int xnthread_get_state(const struct xnthread *thread)
>>>>>> diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
>>>>>> index 9c7753ac4..ebb798b58 100644
>>>>>> --- a/kernel/cobalt/thread.c
>>>>>> +++ b/kernel/cobalt/thread.c
>>>>>> @@ -2065,13 +2065,6 @@ void xnthread_relax(int notify, int reason)
>>>>>>  }
>>>>>>  EXPORT_SYMBOL_GPL(xnthread_relax);
>>>>>>  
>>>>>> -struct lostage_signal {
>>>>>> -	struct pipeline_inband_work inband_work; /* Must be first. */
>>>>>> -	struct task_struct *task;
>>>>>> -	int signo, sigval;
>>>>>> -	struct lostage_signal *self; /* Revisit: I-pipe requirement */
>>>>>> -};
>>>>>> -
>>>>>>  static inline void do_kthread_signal(struct task_struct *p,
>>>>>>  				     struct xnthread *thread,
>>>>>>  				     struct lostage_signal *rq)
>>>>>> @@ -2111,7 +2104,7 @@ static void lostage_task_signal(struct pipeline_inband_work *inband_work)
>>>>>>  	} else
>>>>>>  		send_sig(signo, p, 1);
>>>>>>  out:
>>>>>> -	xnfree(rq->self);
>>>>>> +	memset(rq->self, 0, sizeof(*rq));
>>>>>>  }
>>>>>>  
>>>>>>  static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
>>>>>> @@ -2273,14 +2266,56 @@ void xnthread_demote(struct xnthread *thread)
>>>>>>  }
>>>>>>  EXPORT_SYMBOL_GPL(xnthread_demote);
>>>>>>  
>>>>>> +int get_slot_index_from_sig(int sig, int arg)
>>>>>> +{
>>>>>> +	int action;
>>>>>> +
>>>>>> +	switch (sig) {
>>>>>> +	case SIGDEBUG:
>>>>>> +		switch (arg) {
>>>>>> +		case SIGDEBUG_WATCHDOG:
>>>>>> +			return 1;
>>>>>> +		case SIGDEBUG_MIGRATE_SIGNAL:
>>>>>> +		case SIGDEBUG_MIGRATE_SYSCALL:
>>>>>> +		case SIGDEBUG_MIGRATE_FAULT:
>>>>>> +		case SIGDEBUG_MIGRATE_PRIOINV:
>>>>>> +		case SIGDEBUG_NOMLOCK:
>>>>>> +		case SIGDEBUG_RESCNT_IMBALANCE:
>>>>>> +		case SIGDEBUG_LOCK_BREAK:
>>>>>> +		case SIGDEBUG_MUTEX_SLEEP:
>>>>>> +			return 0;
>>>>>> +		}
>>>>>> +		break;
>>>>>> +	case SIGSHADOW:
>>>>>> +		action = sigshadow_action(arg);
>>>>>> +		switch (action) {
>>>>>> +		case SIGSHADOW_ACTION_HARDEN:
>>>>>> +			return 2;
>>>>>> +		case SIGSHADOW_ACTION_BACKTRACE:
>>>>>> +			return 3;
>>>>>> +		case SIGSHADOW_ACTION_HOME:
>>>>>> +			return 4;
>>>>>> +		}
>>>>>> +		break;
>>>>>> +	case SIGTERM:
>>>>>> +		return 5;
>>>>>> +	}
>>>>>> +
>>>>>> +	return -1;
>>>>>> +}
>>>>>> +
>>>>>>  void xnthread_signal(struct xnthread *thread, int sig, int arg)
>>>>>>  {
>>>>>>  	struct lostage_signal *sigwork;
>>>>>> +	int slot;
>>>>>> +
>>>>>> +	slot = get_slot_index_from_sig(sig, arg);
>>>>>>  
>>>>>> -	sigwork = xnmalloc(sizeof(*sigwork));
>>>>>> -	if (WARN_ON(sigwork == NULL))
>>>>>> +	if (slot < 0 || slot >= XNTHREAD_SIGNALS_NUM)
>>>>>>  		return;
>>>>>
>>>>> Should keep the warning on errors. And >= XNTHREAD_SIGNALS_NUM cannot
>>>>> happen (over-defensive programming).
>>>>>
>>>>>>  
>>>>>> +	sigwork = &thread->sigarray[slot];
>>>>>> +
>>>>>
>>>>> Does this automatically handle the case of duplicate submissions on the
>>>>> same slot?
>>>>
>>>> No, it can not.  If there is duplicate submissions on the same slot , we have to 
>>>> create a queue to handle them for each slot. But according to " SIGSHADOW(SIGWINCH) and SIGDEBUG(SIGXCPU) are
>>>>  standard non-queueable signals so we should only need a single slot per signal cause." described by Philippe,
>>>> they should be non-queueable signals? Please correct me if  I am wrong.
>>>
>>> I'm not concerned about queuing, rather about that we might crash or
>>> somehow invalidate the already submitted event. That's why we should
>>> think this case through even if impossible for some cases but maybe not all.
> 
> According to my test, issue invalidating the already submitted event really exist in sigdebug test item of testsuite with current
> Implementation.  Let me debug and understand more detailed before implement new solution. Thanks for your comments.
> 

Basically, I would like to ensure that

xnthread_signal(thread_a, sig_b, arg_c);
xnthread_signal(thread_a, sig_b, arg_d);

results in either receiving sig_b+arg_c once or sig_b+arg_c followed by
sib_b+arg_d soon after (because the two submissions raced with the
reception). But it should never trigger sib_b+arg_d alone (first-come,
first-serve), corrupt the message or even crash the core.

Jan

-- 
Siemens AG, T RDA IOT
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-06-09  5:40 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-08  7:04 [PATCH] cobalt/thread: get rid of dynamic allocation for xnthread_signal Hongzhan Chen
2021-06-08  8:27 ` Jan Kiszka
2021-06-08  9:13   ` Chen, Hongzhan
2021-06-08 10:03     ` Jan Kiszka
2021-06-08 13:52       ` Chen, Hongzhan
2021-06-09  1:46         ` Chen, Hongzhan
2021-06-09  5:40           ` Jan Kiszka
2021-06-09  5:10   ` Chen, Hongzhan
2021-06-09  5:36     ` Jan Kiszka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.