All of lore.kernel.org
 help / color / mirror / Atom feed
* [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
@ 2006-07-07 21:00 Jan Kiszka
  2006-07-08  8:49 ` Philippe Gerum
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2006-07-07 21:00 UTC (permalink / raw)
  To: xenomai-core

[-- Attachment #1: Type: text/plain, Size: 8256 bytes --]

Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
usage statistics. This variant now prints the usage in percent directly via
/proc/xenomai/stat.

As Philippe suggested to reorganise the /proc section for statistics, I decided to make
this probably intermediate version independent of any user-space tools. You can simply
type "cat /proc/xenomai/stat" as often as you like to update the stats. Example:

root@domain.hid :/root# cat /proc/xenomai/stat
CPU  PID    MSW        CSW        PF    STAT       %CPU  NAME
  0  0      0          5836670    0     01400080   78.8  ROOT
  0  0      0          1          0     00000082    0.0  timsPipeReceiver
  0  1029   1          1          0     00c00180    0.0  irqloop
  0  1030   1          7843       0     00c00082    4.2  irqloop
  0  1039   16         140        0     00c00082    0.1  display-1038
  0  0      0          166598     0     00000084   16.9  timerbench

Each output marks the beginning of the following measuring interval, thus when running

    while true; do cat /proc/xenomai/stat; sleep 1; clear; done

you effectively get the CPU usage of the last second continuously updated.

Note that the results are not always accurate when threads are added, removed, or
migrated. If numbers looks weird, just re-run and things should normalise. This is due
to the fact that the measuring only takes place between context switches on each CPU,
now longer explicitly on output. This appears least intrusive and SMP-safe to me.
Nevertheless, I'm lacking SMP here, real testing is still required.

Jan


---
 include/nucleus/pod.h    |   38 ++++++++++++++++++++++++++++++++++++++
 include/nucleus/thread.h |    2 ++
 ksrc/nucleus/module.c    |   40 +++++++++++++++++++++++++++++++++++-----
 ksrc/nucleus/pod.c       |    8 ++++++++
 ksrc/nucleus/thread.c    |    2 ++
 5 files changed, 85 insertions(+), 5 deletions(-)

Index: include/nucleus/thread.h
===================================================================
--- include/nucleus/thread.h.orig
+++ include/nucleus/thread.h
@@ -152,6 +152,8 @@ typedef struct xnthread {
 	unsigned long csw;	/* Context switches (includes
 				   secondary -> primary switches) */
 	unsigned long pf;	/* Number of page faults */
+	xnticks_t exec_time;	/* Accumulated execution time (tsc) */
+	xnticks_t exec_start;	/* Start of execution time accumulation (tsc) */
     } stat;
 #endif /* CONFIG_XENO_OPT_STATS */
 
Index: include/nucleus/pod.h
===================================================================
--- include/nucleus/pod.h.orig
+++ include/nucleus/pod.h
@@ -145,6 +145,10 @@ typedef struct xnsched {
 
     xnthread_t rootcb;          /*!< Root thread control block. */
 
+#ifdef CONFIG_XENO_OPT_STATS
+    xnticks_t last_csw;         /*!< Last context switch (ticks). */
+#endif /* CONFIG_XENO_OPT_STATS */
+
 } xnsched_t;
 
 #ifdef CONFIG_SMP
@@ -545,6 +549,40 @@ static inline void xnpod_delete_self (vo
     xnpod_delete_thread(xnpod_current_thread());
 }
 
+#ifdef CONFIG_XENO_OPT_STATS
+static inline void xnpod_acc_exec_time(xnsched_t *sched, xnthread_t *thread)
+{
+    xnticks_t now = xnarch_get_cpu_tsc();
+
+    thread->stat.exec_time += now - sched->last_csw;
+    sched->last_csw = now;
+}
+
+static inline void xnpod_reset_exec_stats(xnthread_t *thread)
+{
+    thread->stat.exec_time = 0;
+    thread->stat.exec_start = xnarch_get_cpu_tsc();
+}
+
+static inline void xnpod_update_csw_date(xnsched_t *sched)
+{
+    sched->last_csw = xnarch_get_cpu_tsc();
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+static inline void xnpod_acc_exec_time(xnsched_t *sched, xnthread_t *thread)
+{
+}
+
+static inline void xnpod_reset_exec_stats(xnthread_t *thread)
+{
+}
+
+static inline void xnpod_update_csw_date(xnsched_t *sched)
+{
+}
+#endif /* CONFIG_XENO_OPT_STATS */
+
 #ifdef __cplusplus
 }
 #endif
Index: ksrc/nucleus/pod.c
===================================================================
--- ksrc/nucleus/pod.c.orig
+++ ksrc/nucleus/pod.c
@@ -669,6 +669,9 @@ static inline void xnpod_switch_zombie(x
 
 	xnthread_cleanup_tcb(threadout);
 
+	/* no need to update stats of dying thread */
+	xnpod_update_csw_date(sched);
+
 	xnarch_finalize_and_switch(xnthread_archtcb(threadout),
 				   xnthread_archtcb(threadin));
 
@@ -1889,6 +1892,9 @@ int xnpod_migrate_thread(int cpu)
 
 	xnpod_schedule();
 
+	/* Reset execution time stats due to unsync'ed TSCs */
+	xnpod_reset_exec_stats(thread);
+
       unlock_and_exit:
 
 	xnlock_put_irqrestore(&nklock, s);
@@ -2433,6 +2439,7 @@ void xnpod_schedule(void)
 		xnarch_enter_root(xnthread_archtcb(threadin));
 	}
 
+	xnpod_acc_exec_time(sched, threadout);
 	xnthread_inc_csw(threadin);
 
 	xnarch_switch_to(xnthread_archtcb(threadout),
@@ -2604,6 +2611,7 @@ void xnpod_schedule_runnable(xnthread_t 
 		nkpod->schedhook(runthread, XNREADY);
 #endif /* __XENO_SIM__ */
 
+	xnpod_acc_exec_time(sched, runthread);
 	xnthread_inc_csw(threadin);
 
 	xnarch_switch_to(xnthread_archtcb(runthread),
Index: ksrc/nucleus/module.c
===================================================================
--- ksrc/nucleus/module.c.orig
+++ ksrc/nucleus/module.c
@@ -269,6 +269,8 @@ struct stat_seq_iterator {
 		unsigned long ssw;
 		unsigned long csw;
 		unsigned long pf;
+		xnticks_t exec_time;
+		xnticks_t exec_period;
 	} stat_info[1];
 };
 
@@ -309,13 +311,27 @@ static void stat_seq_stop(struct seq_fil
 static int stat_seq_show(struct seq_file *seq, void *v)
 {
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-3s  %-6s %-10s %-10s %-4s  %-8s  %s\n",
-			   "CPU", "PID", "MSW", "CSW", "PF", "STAT", "NAME");
+		seq_printf(seq, "%-3s  %-6s %-10s %-10s %-4s  %-8s  %5s"
+			   "  %s\n",
+			   "CPU", "PID", "MSW", "CSW", "PF", "STAT", "%CPU",
+			   "NAME");
 	else {
 		struct stat_seq_info *p = (struct stat_seq_info *)v;
-		seq_printf(seq, "%3u  %-6d %-10lu %-10lu %-4lu  %.8lx  %s\n",
+		int usage = 0;
+
+		if (p->exec_period) {
+			while (p->exec_period > 0xFFFFFFFF) {
+				p->exec_time >>= 16;
+				p->exec_period >>= 16;
+			}
+			usage = xnarch_ulldiv(
+				p->exec_time * 1000LL + (p->exec_period >> 1),
+				p->exec_period, NULL);
+		}
+		seq_printf(seq, "%3u  %-6d %-10lu %-10lu %-4lu  %.8lx  %3u.%u"
+			   "  %s\n",
 			   p->cpu, p->pid, p->ssw, p->csw, p->pf, p->status,
-			   p->name);
+			   usage / 10, usage % 10, p->name);
 	}
 
 	return 0;
@@ -370,6 +386,8 @@ static int stat_seq_open(struct inode *i
 
 	while (holder) {
 		xnthread_t *thread;
+		xnsched_t *sched;
+		xnticks_t period;
 		int n;
 
 		xnlock_get_irqsave(&nklock, s);
@@ -381,7 +399,8 @@ static int stat_seq_open(struct inode *i
 		thread = link2thread(holder, glink);
 		n = iter->nentries++;
 
-		iter->stat_info[n].cpu = xnsched_cpu(thread->sched);
+		sched = thread->sched;
+		iter->stat_info[n].cpu = xnsched_cpu(sched);
 		iter->stat_info[n].pid = xnthread_user_pid(thread);
 		memcpy(iter->stat_info[n].name, thread->name,
 		       sizeof(iter->stat_info[n].name));
@@ -390,6 +409,17 @@ static int stat_seq_open(struct inode *i
 		iter->stat_info[n].csw = thread->stat.csw;
 		iter->stat_info[n].pf = thread->stat.pf;
 
+		period = sched->last_csw - thread->stat.exec_start;
+		if (!period && thread == sched->runthread) {
+			iter->stat_info[n].exec_time = 1;
+			iter->stat_info[n].exec_period = 1;
+		} else {
+			iter->stat_info[n].exec_time = thread->stat.exec_time;
+			iter->stat_info[n].exec_period = period;
+		}
+		thread->stat.exec_time = 0;
+		thread->stat.exec_start = sched->last_csw;
+
 		holder = nextq(&nkpod->threadq, holder);
 
 		xnlock_put_irqrestore(&nklock, s);
Index: ksrc/nucleus/thread.c
===================================================================
--- ksrc/nucleus/thread.c.orig
+++ ksrc/nucleus/thread.c
@@ -90,6 +90,8 @@ int xnthread_init(xnthread_t *thread,
 	thread->stat.ssw = 0;
 	thread->stat.csw = 0;
 	thread->stat.pf = 0;
+	thread->stat.exec_time = 0;
+	thread->stat.exec_start = 0;
 #endif /* CONFIG_XENO_OPT_STATS */
 
 	/* These will be filled by xnpod_start_thread() */




[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 249 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
  2006-07-07 21:00 [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats Jan Kiszka
@ 2006-07-08  8:49 ` Philippe Gerum
  2006-07-08  9:09   ` Jan Kiszka
  0 siblings, 1 reply; 6+ messages in thread
From: Philippe Gerum @ 2006-07-08  8:49 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai-core

On Fri, 2006-07-07 at 23:00 +0200, Jan Kiszka wrote:
> Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
> usage statistics. This variant now prints the usage in percent directly via
> /proc/xenomai/stat.

Merged, thanks.

-- 
Philippe.




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
  2006-07-08  8:49 ` Philippe Gerum
@ 2006-07-08  9:09   ` Jan Kiszka
  2006-07-08  9:16     ` Philippe Gerum
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2006-07-08  9:09 UTC (permalink / raw)
  To: rpm; +Cc: xenomai-core

[-- Attachment #1: Type: text/plain, Size: 381 bytes --]

Philippe Gerum wrote:
> On Fri, 2006-07-07 at 23:00 +0200, Jan Kiszka wrote:
>> Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
>> usage statistics. This variant now prints the usage in percent directly via
>> /proc/xenomai/stat.
> 
> Merged, thanks.
> 

Tabs seems to got lost on merge (I think my posted patches where ok).

Jan


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 249 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
  2006-07-08  9:09   ` Jan Kiszka
@ 2006-07-08  9:16     ` Philippe Gerum
  2006-07-08  9:20       ` Jan Kiszka
  0 siblings, 1 reply; 6+ messages in thread
From: Philippe Gerum @ 2006-07-08  9:16 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai-core

On Sat, 2006-07-08 at 11:09 +0200, Jan Kiszka wrote:
> Philippe Gerum wrote:
> > On Fri, 2006-07-07 at 23:00 +0200, Jan Kiszka wrote:
> >> Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
> >> usage statistics. This variant now prints the usage in percent directly via
> >> /proc/xenomai/stat.
> > 
> > Merged, thanks.
> > 
> 
> Tabs seems to got lost on merge (I think my posted patches where ok).

Looking at the mail archive on GNA, it seems they were not. I've planned
to rerun Lindent over the sources anyway.

> 
> Jan
> 
-- 
Philippe.




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
  2006-07-08  9:16     ` Philippe Gerum
@ 2006-07-08  9:20       ` Jan Kiszka
  2006-07-08  9:28         ` Philippe Gerum
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2006-07-08  9:20 UTC (permalink / raw)
  To: rpm; +Cc: xenomai-core

[-- Attachment #1: Type: text/plain, Size: 873 bytes --]

Philippe Gerum wrote:
> On Sat, 2006-07-08 at 11:09 +0200, Jan Kiszka wrote:
>> Philippe Gerum wrote:
>>> On Fri, 2006-07-07 at 23:00 +0200, Jan Kiszka wrote:
>>>> Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
>>>> usage statistics. This variant now prints the usage in percent directly via
>>>> /proc/xenomai/stat.
>>> Merged, thanks.
>>>
>> Tabs seems to got lost on merge (I think my posted patches where ok).
> 
> Looking at the mail archive on GNA, it seems they were not. I've planned
> to rerun Lindent over the sources anyway.

Hmm, strange. Checking the versions received via my own subscription
(PATCH 1/2 and 2/2), there are tabs at lines where there are non in
trunk now.

It's indeed a minor issue, but I wonder if I should continue to post
patches inlined or fall back to attachments again.

Jan


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 249 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats
  2006-07-08  9:20       ` Jan Kiszka
@ 2006-07-08  9:28         ` Philippe Gerum
  0 siblings, 0 replies; 6+ messages in thread
From: Philippe Gerum @ 2006-07-08  9:28 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai-core

On Sat, 2006-07-08 at 11:20 +0200, Jan Kiszka wrote:
> Philippe Gerum wrote:
> > On Sat, 2006-07-08 at 11:09 +0200, Jan Kiszka wrote:
> >> Philippe Gerum wrote:
> >>> On Fri, 2006-07-07 at 23:00 +0200, Jan Kiszka wrote:
> >>>> Yet another (hopefully the final) version of my attempt to add simple per-thread CPU
> >>>> usage statistics. This variant now prints the usage in percent directly via
> >>>> /proc/xenomai/stat.
> >>> Merged, thanks.
> >>>
> >> Tabs seems to got lost on merge (I think my posted patches where ok).
> > 
> > Looking at the mail archive on GNA, it seems they were not. I've planned
> > to rerun Lindent over the sources anyway.
> 
> Hmm, strange. Checking the versions received via my own subscription
> (PATCH 1/2 and 2/2), there are tabs at lines where there are non in
> trunk now.
> 
> It's indeed a minor issue, but I wonder if I should continue to post
> patches inlined or fall back to attachments again.

I don't know what went wrong, but the space/Tab issue appeared
specifically with your latest patches a week ago or so; so far, patching
with -l solved the issue, so that's not a big deal, provided a
reindentation is done afterwise to suppress unwanted spaces. I've never
had any problem using your attachments verbatim, though.

> 
> Jan
> 
-- 
Philippe.




^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2006-07-08  9:28 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-07-07 21:00 [Xenomai-core] [PATCH 2/2] /proc output - add CPU usage stats Jan Kiszka
2006-07-08  8:49 ` Philippe Gerum
2006-07-08  9:09   ` Jan Kiszka
2006-07-08  9:16     ` Philippe Gerum
2006-07-08  9:20       ` Jan Kiszka
2006-07-08  9:28         ` Philippe Gerum

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.