linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
@ 2021-03-26 20:51 Alexander Lochmann
  2021-03-27 10:37 ` Greg Kroah-Hartman
  2021-03-27 14:56 ` Andrey Konovalov
  0 siblings, 2 replies; 6+ messages in thread
From: Alexander Lochmann @ 2021-03-26 20:51 UTC (permalink / raw)
  Cc: Alexander Lochmann, Dmitry Vyukov, Andrey Konovalov,
	Jonathan Corbet, Randy Dunlap, Andrew Klychkov, Miguel Ojeda,
	Greg Kroah-Hartman, Andrew Morton, Jakub Kicinski,
	Aleksandr Nogikh, Wei Yongjun, Maciej Grochowski, kasan-dev,
	linux-doc, linux-kernel

It simply stores the executed PCs.
The execution order is discarded.
Each bit in the shared buffer represents every fourth
byte of the text segment.
Since a call instruction on every supported
architecture is at least four bytes, it is safe
to just store every fourth byte of the text segment.
In contrast to KCOV_MODE_TRACE_PC, the shared buffer
cannot overflow. Thus, all executed PCs are recorded.

Signed-off-by: Alexander Lochmann <info@alexander-lochmann.de>
---
 Documentation/dev-tools/kcov.rst | 79 +++++++++++++++++++++++++
 include/linux/kcov.h             | 12 ++--
 include/uapi/linux/kcov.h        | 10 ++++
 kernel/kcov.c                    | 98 ++++++++++++++++++++++++--------
 4 files changed, 172 insertions(+), 27 deletions(-)

diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index d2c4c27e1702..9b0df2f8474c 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -127,6 +127,85 @@ That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
 mmaps coverage buffer and then forks child processes in a loop. Child processes
 only need to enable coverage (disable happens automatically on thread end).
 
+If someone is interested in a set of executed PCs, and does not care about
+execution order, he or she can use KCOV_INIT_UNIQUE to do so:
+
+.. code-block:: c
+
+    #include <stdio.h>
+    #include <stddef.h>
+    #include <stdint.h>
+    #include <stdlib.h>
+    #include <sys/types.h>
+    #include <sys/stat.h>
+    #include <sys/ioctl.h>
+    #include <sys/mman.h>
+    #include <unistd.h>
+    #include <fcntl.h>
+
+    #define KCOV_INIT_UNIQUE                _IOR('c', 2, unsigned long)
+    #define KCOV_ENABLE			_IO('c', 100)
+    #define KCOV_DISABLE			_IO('c', 101)
+
+    #define BITS_PER_LONG 64
+    #define KCOV_TRACE_PC  0
+    #define KCOV_TRACE_CMP 1
+    #define KCOV_UNIQUE_PC 2
+    /*
+     * Determine start of text segment via 'nm vmlinux | grep _stext | cut -d " " -f1',
+     * and fill in.
+     */
+    #define STEXT_START 0xffffffff81000000
+
+
+
+    int main(int argc, char **argv)
+    {
+	int fd;
+	unsigned long *cover, n, i;
+
+	/* A single fd descriptor allows coverage collection on a single
+	 * thread.
+	 */
+	fd = open("/sys/kernel/debug/kcov", O_RDWR);
+	if (fd == -1)
+		perror("open"), exit(1);
+	/* Setup trace mode and trace size. */
+	if ((n = ioctl(fd, KCOV_INIT_UNIQUE, 0)) < 0)
+		perror("ioctl"), exit(1);
+	/* Mmap buffer shared between kernel- and user-space. */
+	cover = (unsigned long*)mmap(NULL, n,
+				     PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	if ((void*)cover == MAP_FAILED)
+		perror("mmap"), exit(1);
+	/* Enable coverage collection on the current thread. */
+	if (ioctl(fd, KCOV_ENABLE, KCOV_UNIQUE_PC))
+		perror("ioctl"), exit(1);
+	/* That's the target syscal call. */
+	read(-1, NULL, 0);
+	/* Disable coverage collection for the current thread. After this call
+	 * coverage can be enabled for a different thread.
+	 */
+	if (ioctl(fd, KCOV_DISABLE, 0))
+		perror("ioctl"), exit(1);
+        /* Convert byte size into element size */
+        n /= sizeof(unsigned long);
+        /* Print executed PCs in sorted order */
+        for (i = 0; i < n; i++) {
+            for (int j = 0; j < BITS_PER_LONG; j++) {
+                if (cover[i] & (1L << j)) {
+                    printf("0x%jx\n", (uintmax_t)(STEXT_START + (i * BITS_PER_LONG + j) * 4));
+                }
+            }
+        }
+	/* Free resources. */
+	if (munmap(cover, n * sizeof(unsigned long)))
+		perror("munmap"), exit(1);
+	if (close(fd))
+		perror("close"), exit(1);
+	return 0;
+    }
+
 Comparison operands collection
 ------------------------------
 
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 4e3037dc1204..99c309b3a53b 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -12,17 +12,21 @@ enum kcov_mode {
 	/* Coverage collection is not enabled yet. */
 	KCOV_MODE_DISABLED = 0,
 	/* KCOV was initialized, but tracing mode hasn't been chosen yet. */
-	KCOV_MODE_INIT = 1,
+	KCOV_MODE_INIT_TRACE = 1,
+	/* KCOV was initialized, but recording of unique PCs hasn't been chosen yet. */
+	KCOV_MODE_INIT_UNIQUE = 2,
 	/*
 	 * Tracing coverage collection mode.
 	 * Covered PCs are collected in a per-task buffer.
 	 */
-	KCOV_MODE_TRACE_PC = 2,
+	KCOV_MODE_TRACE_PC = 4,
 	/* Collecting comparison operands mode. */
-	KCOV_MODE_TRACE_CMP = 3,
+	KCOV_MODE_TRACE_CMP = 8,
+	/* Collecting unique covered PCs. Execution order is not saved. */
+	KCOV_MODE_UNIQUE_PC = 16,
 };
 
-#define KCOV_IN_CTXSW	(1 << 30)
+#define KCOV_IN_CTXSW	(1 << 31)
 
 void kcov_task_init(struct task_struct *t);
 void kcov_task_exit(struct task_struct *t);
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 1d0350e44ae3..5b99b6d1a1ac 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -19,6 +19,7 @@ struct kcov_remote_arg {
 #define KCOV_REMOTE_MAX_HANDLES		0x100
 
 #define KCOV_INIT_TRACE			_IOR('c', 1, unsigned long)
+#define KCOV_INIT_UNIQUE		_IOR('c', 2, unsigned long)
 #define KCOV_ENABLE			_IO('c', 100)
 #define KCOV_DISABLE			_IO('c', 101)
 #define KCOV_REMOTE_ENABLE		_IOW('c', 102, struct kcov_remote_arg)
@@ -35,6 +36,15 @@ enum {
 	KCOV_TRACE_PC = 0,
 	/* Collecting comparison operands mode. */
 	KCOV_TRACE_CMP = 1,
+	/*
+	 * Unique coverage collection mode.
+	 * Unique covered PCs are collected in a per-task buffer.
+	 * De-duplicates the collected PCs. Execution order is *not* saved.
+	 * Each bit in the buffer represents every fourth byte of the text segment.
+	 * Since a call instruction is at least four bytes on every supported
+	 * architecture, storing just every fourth byte is sufficient.
+	 */
+	KCOV_UNIQUE_PC = 2,
 };
 
 /*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 80bfe71bbe13..9d64d672a5dc 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -24,6 +24,7 @@
 #include <linux/refcount.h>
 #include <linux/log2.h>
 #include <asm/setup.h>
+#include <asm/sections.h>
 
 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
 
@@ -151,10 +152,10 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
 	list_add(&area->list, &kcov_remote_areas);
 }
 
-static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static __always_inline notrace bool check_kcov_mode(enum kcov_mode needed_mode,
+						    struct task_struct *t,
+						    unsigned int *mode)
 {
-	unsigned int mode;
-
 	/*
 	 * We are interested in code coverage as a function of a syscall inputs,
 	 * so we ignore code executed in interrupts, unless we are in a remote
@@ -162,7 +163,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
 	 */
 	if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
 		return false;
-	mode = READ_ONCE(t->kcov_mode);
+	*mode = READ_ONCE(t->kcov_mode);
 	/*
 	 * There is some code that runs in interrupts but for which
 	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
@@ -171,7 +172,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
 	 * kcov_start().
 	 */
 	barrier();
-	return mode == needed_mode;
+	return ((int)(*mode & (KCOV_IN_CTXSW | needed_mode))) > 0;
 }
 
 static notrace unsigned long canonicalize_ip(unsigned long ip)
@@ -191,18 +192,27 @@ void notrace __sanitizer_cov_trace_pc(void)
 	struct task_struct *t;
 	unsigned long *area;
 	unsigned long ip = canonicalize_ip(_RET_IP_);
-	unsigned long pos;
+	unsigned long pos, idx;
+	unsigned int mode;
 
 	t = current;
-	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
+	if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_UNIQUE_PC, t, &mode))
 		return;
 
 	area = t->kcov_area;
-	/* The first 64-bit word is the number of subsequent PCs. */
-	pos = READ_ONCE(area[0]) + 1;
-	if (likely(pos < t->kcov_size)) {
-		area[pos] = ip;
-		WRITE_ONCE(area[0], pos);
+	if (likely(mode == KCOV_MODE_TRACE_PC)) {
+		/* The first 64-bit word is the number of subsequent PCs. */
+		pos = READ_ONCE(area[0]) + 1;
+		if (likely(pos < t->kcov_size)) {
+			area[pos] = ip;
+			WRITE_ONCE(area[0], pos);
+		}
+	} else {
+		idx = (ip - canonicalize_ip((unsigned long)&_stext)) / 4;
+		pos = idx % BITS_PER_LONG;
+		idx /= BITS_PER_LONG;
+		if (likely(idx < t->kcov_size))
+			WRITE_ONCE(area[idx], READ_ONCE(area[idx]) | 1L << pos);
 	}
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
@@ -213,9 +223,10 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
 	struct task_struct *t;
 	u64 *area;
 	u64 count, start_index, end_pos, max_pos;
+	unsigned int mode;
 
 	t = current;
-	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t, &mode))
 		return;
 
 	ip = canonicalize_ip(ip);
@@ -362,7 +373,7 @@ void kcov_task_init(struct task_struct *t)
 static void kcov_reset(struct kcov *kcov)
 {
 	kcov->t = NULL;
-	kcov->mode = KCOV_MODE_INIT;
+	kcov->mode = KCOV_MODE_INIT_TRACE;
 	kcov->remote = false;
 	kcov->remote_size = 0;
 	kcov->sequence++;
@@ -468,12 +479,13 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 
 	spin_lock_irqsave(&kcov->lock, flags);
 	size = kcov->size * sizeof(unsigned long);
-	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
+	if (kcov->mode & ~(KCOV_MODE_INIT_TRACE | KCOV_MODE_INIT_UNIQUE) || vma->vm_pgoff != 0 ||
 	    vma->vm_end - vma->vm_start != size) {
 		res = -EINVAL;
 		goto exit;
 	}
 	if (!kcov->area) {
+		kcov_debug("mmap(): Allocating 0x%lx bytes\n", size);
 		kcov->area = area;
 		vma->vm_flags |= VM_DONTEXPAND;
 		spin_unlock_irqrestore(&kcov->lock, flags);
@@ -515,6 +527,8 @@ static int kcov_get_mode(unsigned long arg)
 {
 	if (arg == KCOV_TRACE_PC)
 		return KCOV_MODE_TRACE_PC;
+	else if (arg == KCOV_UNIQUE_PC)
+		return KCOV_MODE_UNIQUE_PC;
 	else if (arg == KCOV_TRACE_CMP)
 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
 		return KCOV_MODE_TRACE_CMP;
@@ -562,12 +576,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 {
 	struct task_struct *t;
 	unsigned long size, unused;
-	int mode, i;
+	int mode, i, text_size, ret = 0;
 	struct kcov_remote_arg *remote_arg;
 	struct kcov_remote *remote;
 	unsigned long flags;
 
 	switch (cmd) {
+	case KCOV_INIT_UNIQUE:
+		fallthrough;
 	case KCOV_INIT_TRACE:
 		/*
 		 * Enable kcov in trace mode and setup buffer size.
@@ -581,11 +597,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 		 * that must not overflow.
 		 */
 		size = arg;
-		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
-			return -EINVAL;
-		kcov->size = size;
-		kcov->mode = KCOV_MODE_INIT;
-		return 0;
+		if (cmd == KCOV_INIT_UNIQUE) {
+			if (size != 0)
+				return -EINVAL;
+			text_size = (canonicalize_ip((unsigned long)&_etext)
+				     - canonicalize_ip((unsigned long)&_stext));
+			/**
+			 * A call instr is at least four bytes on every supported architecture.
+			 * Hence, just every fourth instruction can potentially be a call.
+			 */
+			text_size = roundup(text_size, 4);
+			text_size /= 4;
+			/*
+			 * Round up size of text segment to multiple of BITS_PER_LONG.
+			 * Otherwise, we cannot track
+			 * the last (text_size % BITS_PER_LONG) addresses.
+			 */
+			text_size = roundup(text_size, BITS_PER_LONG);
+			/* Get the amount of bytes needed */
+			text_size = text_size / 8;
+			/* mmap() requires size to be a multiple of PAGE_SIZE */
+			text_size = roundup(text_size, PAGE_SIZE);
+			/* Get the cover size (= amount of bytes stored) */
+			ret = text_size;
+			kcov->size = text_size / sizeof(unsigned long);
+			kcov_debug("text size = 0x%lx, roundup = 0x%x, kcov->size = 0x%x\n",
+					((unsigned long)&_etext) - ((unsigned long)&_stext),
+					text_size,
+					kcov->size);
+			kcov->mode = KCOV_MODE_INIT_UNIQUE;
+		} else {
+			if (size < 2 || size > INT_MAX / sizeof(unsigned long))
+				return -EINVAL;
+			kcov->size = size;
+			kcov->mode = KCOV_MODE_INIT_TRACE;
+		}
+		return ret;
 	case KCOV_ENABLE:
 		/*
 		 * Enable coverage for the current task.
@@ -594,7 +641,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 		 * at task exit or voluntary by KCOV_DISABLE. After that it can
 		 * be enabled for another task.
 		 */
-		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
+		if (!kcov->area)
 			return -EINVAL;
 		t = current;
 		if (kcov->t != NULL || t->kcov != NULL)
@@ -602,6 +649,11 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 		mode = kcov_get_mode(arg);
 		if (mode < 0)
 			return mode;
+		if (kcov->mode == KCOV_MODE_INIT_TRACE && mode == KCOV_MODE_UNIQUE_PC)
+			return -EINVAL;
+		if (kcov->mode == KCOV_MODE_INIT_UNIQUE &&
+		    (mode & (KCOV_MODE_TRACE_PC | KCOV_MODE_TRACE_CMP)))
+			return -EINVAL;
 		kcov_fault_in_area(kcov);
 		kcov->mode = mode;
 		kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
@@ -622,7 +674,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 		kcov_put(kcov);
 		return 0;
 	case KCOV_REMOTE_ENABLE:
-		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
+		if (kcov->mode != KCOV_MODE_INIT_TRACE || !kcov->area)
 			return -EINVAL;
 		t = current;
 		if (kcov->t != NULL || t->kcov != NULL)
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
  2021-03-26 20:51 [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE Alexander Lochmann
@ 2021-03-27 10:37 ` Greg Kroah-Hartman
  2021-03-27 14:56 ` Andrey Konovalov
  1 sibling, 0 replies; 6+ messages in thread
From: Greg Kroah-Hartman @ 2021-03-27 10:37 UTC (permalink / raw)
  To: Alexander Lochmann
  Cc: Dmitry Vyukov, Andrey Konovalov, Jonathan Corbet, Randy Dunlap,
	Andrew Klychkov, Miguel Ojeda, Andrew Morton, Jakub Kicinski,
	Aleksandr Nogikh, Wei Yongjun, Maciej Grochowski, kasan-dev,
	linux-doc, linux-kernel

On Fri, Mar 26, 2021 at 09:51:28PM +0100, Alexander Lochmann wrote:
> It simply stores the executed PCs.
> The execution order is discarded.
> Each bit in the shared buffer represents every fourth
> byte of the text segment.
> Since a call instruction on every supported
> architecture is at least four bytes, it is safe
> to just store every fourth byte of the text segment.
> In contrast to KCOV_MODE_TRACE_PC, the shared buffer
> cannot overflow. Thus, all executed PCs are recorded.

Odd line-wrapping :(

Anyway, this describes _what_ this does, but I have no idea _why_ we
want this at all.  What does this do that you can not do today?  Why is
this needed?  Who will use this?  What tools have been modified to work
with it to prove it works properly?

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
  2021-03-26 20:51 [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE Alexander Lochmann
  2021-03-27 10:37 ` Greg Kroah-Hartman
@ 2021-03-27 14:56 ` Andrey Konovalov
  2021-04-16  8:42   ` Dmitry Vyukov
  2021-09-18 20:22   ` Alexander Lochmann
  1 sibling, 2 replies; 6+ messages in thread
From: Andrey Konovalov @ 2021-03-27 14:56 UTC (permalink / raw)
  To: Alexander Lochmann
  Cc: Dmitry Vyukov, Andrey Konovalov, Jonathan Corbet, Randy Dunlap,
	Andrew Klychkov, Miguel Ojeda, Greg Kroah-Hartman, Andrew Morton,
	Jakub Kicinski, Aleksandr Nogikh, Wei Yongjun, Maciej Grochowski,
	kasan-dev, linux-doc, LKML

On Fri, Mar 26, 2021 at 9:52 PM Alexander Lochmann
<info@alexander-lochmann.de> wrote:
>

Hi Alexander,

> It simply stores the executed PCs.
> The execution order is discarded.
> Each bit in the shared buffer represents every fourth
> byte of the text segment.
> Since a call instruction on every supported
> architecture is at least four bytes, it is safe
> to just store every fourth byte of the text segment.

What about jumps?

[...]

> -#define KCOV_IN_CTXSW  (1 << 30)
> +#define KCOV_IN_CTXSW  (1 << 31)

This change needs to be mentioned and explained in the changelog.

> -static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
> +static __always_inline notrace bool check_kcov_mode(enum kcov_mode needed_mode,
> +                                                   struct task_struct *t,
> +                                                   unsigned int *mode)
>  {
> -       unsigned int mode;
> -
>         /*
>          * We are interested in code coverage as a function of a syscall inputs,
>          * so we ignore code executed in interrupts, unless we are in a remote
> @@ -162,7 +163,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
>          */
>         if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
>                 return false;
> -       mode = READ_ONCE(t->kcov_mode);
> +       *mode = READ_ONCE(t->kcov_mode);
>         /*
>          * There is some code that runs in interrupts but for which
>          * in_interrupt() returns false (e.g. preempt_schedule_irq()).
> @@ -171,7 +172,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
>          * kcov_start().
>          */
>         barrier();
> -       return mode == needed_mode;
> +       return ((int)(*mode & (KCOV_IN_CTXSW | needed_mode))) > 0;

This change needs to be mentioned and explained in the changelog.

[...]

>  static notrace unsigned long canonicalize_ip(unsigned long ip)
> @@ -191,18 +192,27 @@ void notrace __sanitizer_cov_trace_pc(void)
>         struct task_struct *t;
>         unsigned long *area;
>         unsigned long ip = canonicalize_ip(_RET_IP_);
> -       unsigned long pos;
> +       unsigned long pos, idx;
> +       unsigned int mode;
>
>         t = current;
> -       if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
> +       if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_UNIQUE_PC, t, &mode))
>                 return;
>
>         area = t->kcov_area;
> -       /* The first 64-bit word is the number of subsequent PCs. */
> -       pos = READ_ONCE(area[0]) + 1;
> -       if (likely(pos < t->kcov_size)) {
> -               area[pos] = ip;
> -               WRITE_ONCE(area[0], pos);
> +       if (likely(mode == KCOV_MODE_TRACE_PC)) {
> +               /* The first 64-bit word is the number of subsequent PCs. */
> +               pos = READ_ONCE(area[0]) + 1;
> +               if (likely(pos < t->kcov_size)) {
> +                       area[pos] = ip;
> +                       WRITE_ONCE(area[0], pos);
> +               }
> +       } else {
> +               idx = (ip - canonicalize_ip((unsigned long)&_stext)) / 4;
> +               pos = idx % BITS_PER_LONG;
> +               idx /= BITS_PER_LONG;
> +               if (likely(idx < t->kcov_size))
> +                       WRITE_ONCE(area[idx], READ_ONCE(area[idx]) | 1L << pos);

This is confusing: for KCOV_MODE_TRACE_PC, pos is used to index area,
and for else, idx is used to index area. You should swap idx and pos.

[...]

> @@ -213,9 +223,10 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
>         struct task_struct *t;
>         u64 *area;
>         u64 count, start_index, end_pos, max_pos;
> +       unsigned int mode;
>
>         t = current;
> -       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
> +       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t, &mode))
>                 return;

mode isn't used here, right? No need for it then.

> @@ -562,12 +576,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>  {
>         struct task_struct *t;
>         unsigned long size, unused;
> -       int mode, i;
> +       int mode, i, text_size, ret = 0;
>         struct kcov_remote_arg *remote_arg;
>         struct kcov_remote *remote;
>         unsigned long flags;
>
>         switch (cmd) {
> +       case KCOV_INIT_UNIQUE:
> +               fallthrough;
>         case KCOV_INIT_TRACE:
>                 /*
>                  * Enable kcov in trace mode and setup buffer size.
> @@ -581,11 +597,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>                  * that must not overflow.
>                  */
>                 size = arg;
> -               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> -                       return -EINVAL;
> -               kcov->size = size;
> -               kcov->mode = KCOV_MODE_INIT;
> -               return 0;
> +               if (cmd == KCOV_INIT_UNIQUE) {

Let's put this code under KCOV_INIT_UNIQUE in the switch. This
internal if only saves duplicating two lines of code, which isn't
worth it.

> +                       if (size != 0)
> +                               return -EINVAL;
> +                       text_size = (canonicalize_ip((unsigned long)&_etext)
> +                                    - canonicalize_ip((unsigned long)&_stext));
> +                       /**
> +                        * A call instr is at least four bytes on every supported architecture.
> +                        * Hence, just every fourth instruction can potentially be a call.
> +                        */
> +                       text_size = roundup(text_size, 4);
> +                       text_size /= 4;
> +                       /*
> +                        * Round up size of text segment to multiple of BITS_PER_LONG.
> +                        * Otherwise, we cannot track
> +                        * the last (text_size % BITS_PER_LONG) addresses.
> +                        */
> +                       text_size = roundup(text_size, BITS_PER_LONG);
> +                       /* Get the amount of bytes needed */
> +                       text_size = text_size / 8;
> +                       /* mmap() requires size to be a multiple of PAGE_SIZE */
> +                       text_size = roundup(text_size, PAGE_SIZE);
> +                       /* Get the cover size (= amount of bytes stored) */
> +                       ret = text_size;
> +                       kcov->size = text_size / sizeof(unsigned long);
> +                       kcov_debug("text size = 0x%lx, roundup = 0x%x, kcov->size = 0x%x\n",
> +                                       ((unsigned long)&_etext) - ((unsigned long)&_stext),
> +                                       text_size,
> +                                       kcov->size);
> +                       kcov->mode = KCOV_MODE_INIT_UNIQUE;
> +               } else {
> +                       if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> +                               return -EINVAL;
> +                       kcov->size = size;
> +                       kcov->mode = KCOV_MODE_INIT_TRACE;
> +               }
> +               return ret;

Thanks!

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
  2021-03-27 14:56 ` Andrey Konovalov
@ 2021-04-16  8:42   ` Dmitry Vyukov
  2021-04-17 19:46     ` Andrey Konovalov
  2021-09-18 20:22   ` Alexander Lochmann
  1 sibling, 1 reply; 6+ messages in thread
From: Dmitry Vyukov @ 2021-04-16  8:42 UTC (permalink / raw)
  To: Andrey Konovalov
  Cc: Alexander Lochmann, Andrey Konovalov, Jonathan Corbet,
	Randy Dunlap, Andrew Klychkov, Miguel Ojeda, Greg Kroah-Hartman,
	Andrew Morton, Jakub Kicinski, Aleksandr Nogikh, Wei Yongjun,
	Maciej Grochowski, kasan-dev, open list:DOCUMENTATION, LKML

On Sat, Mar 27, 2021 at 3:56 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Fri, Mar 26, 2021 at 9:52 PM Alexander Lochmann
> <info@alexander-lochmann.de> wrote:
> >
>
> Hi Alexander,
>
> > It simply stores the executed PCs.
> > The execution order is discarded.
> > Each bit in the shared buffer represents every fourth
> > byte of the text segment.
> > Since a call instruction on every supported
> > architecture is at least four bytes, it is safe
> > to just store every fourth byte of the text segment.
>
> What about jumps?

KCOV adds call __sanitizer_cov_trace_pc per coverage point. So besides
the instructions in the original code, we also always have this call.


> [...]
>
> > -#define KCOV_IN_CTXSW  (1 << 30)
> > +#define KCOV_IN_CTXSW  (1 << 31)
>
> This change needs to be mentioned and explained in the changelog.
>
> > -static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
> > +static __always_inline notrace bool check_kcov_mode(enum kcov_mode needed_mode,
> > +                                                   struct task_struct *t,
> > +                                                   unsigned int *mode)
> >  {
> > -       unsigned int mode;
> > -
> >         /*
> >          * We are interested in code coverage as a function of a syscall inputs,
> >          * so we ignore code executed in interrupts, unless we are in a remote
> > @@ -162,7 +163,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
> >          */
> >         if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
> >                 return false;
> > -       mode = READ_ONCE(t->kcov_mode);
> > +       *mode = READ_ONCE(t->kcov_mode);
> >         /*
> >          * There is some code that runs in interrupts but for which
> >          * in_interrupt() returns false (e.g. preempt_schedule_irq()).
> > @@ -171,7 +172,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
> >          * kcov_start().
> >          */
> >         barrier();
> > -       return mode == needed_mode;
> > +       return ((int)(*mode & (KCOV_IN_CTXSW | needed_mode))) > 0;
>
> This change needs to be mentioned and explained in the changelog.
>
> [...]
>
> >  static notrace unsigned long canonicalize_ip(unsigned long ip)
> > @@ -191,18 +192,27 @@ void notrace __sanitizer_cov_trace_pc(void)
> >         struct task_struct *t;
> >         unsigned long *area;
> >         unsigned long ip = canonicalize_ip(_RET_IP_);
> > -       unsigned long pos;
> > +       unsigned long pos, idx;
> > +       unsigned int mode;
> >
> >         t = current;
> > -       if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
> > +       if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_UNIQUE_PC, t, &mode))
> >                 return;
> >
> >         area = t->kcov_area;
> > -       /* The first 64-bit word is the number of subsequent PCs. */
> > -       pos = READ_ONCE(area[0]) + 1;
> > -       if (likely(pos < t->kcov_size)) {
> > -               area[pos] = ip;
> > -               WRITE_ONCE(area[0], pos);
> > +       if (likely(mode == KCOV_MODE_TRACE_PC)) {
> > +               /* The first 64-bit word is the number of subsequent PCs. */
> > +               pos = READ_ONCE(area[0]) + 1;
> > +               if (likely(pos < t->kcov_size)) {
> > +                       area[pos] = ip;
> > +                       WRITE_ONCE(area[0], pos);
> > +               }
> > +       } else {
> > +               idx = (ip - canonicalize_ip((unsigned long)&_stext)) / 4;
> > +               pos = idx % BITS_PER_LONG;
> > +               idx /= BITS_PER_LONG;
> > +               if (likely(idx < t->kcov_size))
> > +                       WRITE_ONCE(area[idx], READ_ONCE(area[idx]) | 1L << pos);
>
> This is confusing: for KCOV_MODE_TRACE_PC, pos is used to index area,
> and for else, idx is used to index area. You should swap idx and pos.
>
> [...]
>
> > @@ -213,9 +223,10 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> >         struct task_struct *t;
> >         u64 *area;
> >         u64 count, start_index, end_pos, max_pos;
> > +       unsigned int mode;
> >
> >         t = current;
> > -       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
> > +       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t, &mode))
> >                 return;
>
> mode isn't used here, right? No need for it then.
>
> > @@ -562,12 +576,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
> >  {
> >         struct task_struct *t;
> >         unsigned long size, unused;
> > -       int mode, i;
> > +       int mode, i, text_size, ret = 0;
> >         struct kcov_remote_arg *remote_arg;
> >         struct kcov_remote *remote;
> >         unsigned long flags;
> >
> >         switch (cmd) {
> > +       case KCOV_INIT_UNIQUE:
> > +               fallthrough;
> >         case KCOV_INIT_TRACE:
> >                 /*
> >                  * Enable kcov in trace mode and setup buffer size.
> > @@ -581,11 +597,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
> >                  * that must not overflow.
> >                  */
> >                 size = arg;
> > -               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> > -                       return -EINVAL;
> > -               kcov->size = size;
> > -               kcov->mode = KCOV_MODE_INIT;
> > -               return 0;
> > +               if (cmd == KCOV_INIT_UNIQUE) {
>
> Let's put this code under KCOV_INIT_UNIQUE in the switch. This
> internal if only saves duplicating two lines of code, which isn't
> worth it.
>
> > +                       if (size != 0)
> > +                               return -EINVAL;
> > +                       text_size = (canonicalize_ip((unsigned long)&_etext)
> > +                                    - canonicalize_ip((unsigned long)&_stext));
> > +                       /**
> > +                        * A call instr is at least four bytes on every supported architecture.
> > +                        * Hence, just every fourth instruction can potentially be a call.
> > +                        */
> > +                       text_size = roundup(text_size, 4);
> > +                       text_size /= 4;
> > +                       /*
> > +                        * Round up size of text segment to multiple of BITS_PER_LONG.
> > +                        * Otherwise, we cannot track
> > +                        * the last (text_size % BITS_PER_LONG) addresses.
> > +                        */
> > +                       text_size = roundup(text_size, BITS_PER_LONG);
> > +                       /* Get the amount of bytes needed */
> > +                       text_size = text_size / 8;
> > +                       /* mmap() requires size to be a multiple of PAGE_SIZE */
> > +                       text_size = roundup(text_size, PAGE_SIZE);
> > +                       /* Get the cover size (= amount of bytes stored) */
> > +                       ret = text_size;
> > +                       kcov->size = text_size / sizeof(unsigned long);
> > +                       kcov_debug("text size = 0x%lx, roundup = 0x%x, kcov->size = 0x%x\n",
> > +                                       ((unsigned long)&_etext) - ((unsigned long)&_stext),
> > +                                       text_size,
> > +                                       kcov->size);
> > +                       kcov->mode = KCOV_MODE_INIT_UNIQUE;
> > +               } else {
> > +                       if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> > +                               return -EINVAL;
> > +                       kcov->size = size;
> > +                       kcov->mode = KCOV_MODE_INIT_TRACE;
> > +               }
> > +               return ret;
>
> Thanks!
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/CA%2BfCnZcTi%3DQLGC_LCdhs%2BfMrxkqX66kXEuM5ewOmjVjifKzUrw%40mail.gmail.com.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
  2021-04-16  8:42   ` Dmitry Vyukov
@ 2021-04-17 19:46     ` Andrey Konovalov
  0 siblings, 0 replies; 6+ messages in thread
From: Andrey Konovalov @ 2021-04-17 19:46 UTC (permalink / raw)
  To: Dmitry Vyukov, Alexander Lochmann
  Cc: Andrey Konovalov, Jonathan Corbet, Randy Dunlap, Andrew Klychkov,
	Miguel Ojeda, Greg Kroah-Hartman, Andrew Morton, Jakub Kicinski,
	Aleksandr Nogikh, Wei Yongjun, Maciej Grochowski, kasan-dev,
	open list:DOCUMENTATION, LKML

On Fri, Apr 16, 2021 at 10:42 AM Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Sat, Mar 27, 2021 at 3:56 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
> >
> > On Fri, Mar 26, 2021 at 9:52 PM Alexander Lochmann
> > <info@alexander-lochmann.de> wrote:
> > >
> >
> > Hi Alexander,
> >
> > > It simply stores the executed PCs.
> > > The execution order is discarded.
> > > Each bit in the shared buffer represents every fourth
> > > byte of the text segment.
> > > Since a call instruction on every supported
> > > architecture is at least four bytes, it is safe
> > > to just store every fourth byte of the text segment.
> >
> > What about jumps?
>
> KCOV adds call __sanitizer_cov_trace_pc per coverage point. So besides
> the instructions in the original code, we also always have this call.

Ah, I see. This should be explained in the changelog.

This means that a KCOV user will need the kernel binary to recover the
actual PCs that were covered, as the information about the lower two
bits is lost, right? This needs to be explained as well.

Thanks!

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE.
  2021-03-27 14:56 ` Andrey Konovalov
  2021-04-16  8:42   ` Dmitry Vyukov
@ 2021-09-18 20:22   ` Alexander Lochmann
  1 sibling, 0 replies; 6+ messages in thread
From: Alexander Lochmann @ 2021-09-18 20:22 UTC (permalink / raw)
  To: Andrey Konovalov
  Cc: Dmitry Vyukov, Andrey Konovalov, Jonathan Corbet, Randy Dunlap,
	Andrew Klychkov, Miguel Ojeda, Greg Kroah-Hartman, Andrew Morton,
	Jakub Kicinski, Aleksandr Nogikh, Wei Yongjun, Maciej Grochowski,
	kasan-dev, linux-doc, LKML


[-- Attachment #1.1: Type: text/plain, Size: 2174 bytes --]



On 27.03.21 15:56, Andrey Konovalov wrote:
> 
>> @@ -213,9 +223,10 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
>>          struct task_struct *t;
>>          u64 *area;
>>          u64 count, start_index, end_pos, max_pos;
>> +       unsigned int mode;
>>
>>          t = current;
>> -       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
>> +       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t, &mode))
>>                  return;
> 
> mode isn't used here, right? No need for it then.
> 
No, it's not. However, check_kcov_mode() needs it. Dmitry suggested 
passing a pointer to check_kcov_mode(), and let the optimizer do the rest.
>> @@ -562,12 +576,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>>   {
>>          struct task_struct *t;
>>          unsigned long size, unused;
>> -       int mode, i;
>> +       int mode, i, text_size, ret = 0;
>>          struct kcov_remote_arg *remote_arg;
>>          struct kcov_remote *remote;
>>          unsigned long flags;
>>
>>          switch (cmd) {
>> +       case KCOV_INIT_UNIQUE:
>> +               fallthrough;
>>          case KCOV_INIT_TRACE:
>>                  /*
>>                   * Enable kcov in trace mode and setup buffer size.
>> @@ -581,11 +597,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>>                   * that must not overflow.
>>                   */
>>                  size = arg;
>> -               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
>> -                       return -EINVAL;
>> -               kcov->size = size;
>> -               kcov->mode = KCOV_MODE_INIT;
>> -               return 0;
>> +               if (cmd == KCOV_INIT_UNIQUE) {
> 
> Let's put this code under KCOV_INIT_UNIQUE in the switch. This
> internal if only saves duplicating two lines of code, which isn't
> worth it.
So. Shall I skip the fallthrough and move 'my' code upwards?

-- 
Alexander Lochmann                PGP key: 0xBC3EF6FD
Heiliger Weg 72                   phone:  +49.231.28053964
D-44141 Dortmund                  mobile: +49.151.15738323


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-09-18 20:30 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-26 20:51 [PATCHv3] Introduced new tracing mode KCOV_MODE_UNIQUE Alexander Lochmann
2021-03-27 10:37 ` Greg Kroah-Hartman
2021-03-27 14:56 ` Andrey Konovalov
2021-04-16  8:42   ` Dmitry Vyukov
2021-04-17 19:46     ` Andrey Konovalov
2021-09-18 20:22   ` Alexander Lochmann

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).