All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kcov: properly handle subsequent mmap calls
@ 2021-12-20 15:21 Aleksandr Nogikh
  2021-12-20 15:59 ` Andrey Konovalov
  0 siblings, 1 reply; 3+ messages in thread
From: Aleksandr Nogikh @ 2021-12-20 15:21 UTC (permalink / raw)
  To: kasan-dev, linux-kernel, akpm
  Cc: dvyukov, andreyknvl, elver, glider, tarasmadan, bigeasy, nogikh

Subsequent mmaps of the same kcov descriptor currently do not update the
virtual memory of the task and yet return 0 (success). This is
counter-intuitive and may lead to unexpected memory access errors.

Also, this unnecessarily limits the functionality of kcov to only the
simplest usage scenarios. Kcov instances are effectively forever attached
to their first address spaces and it becomes impossible to e.g. reuse the
same kcov handle in forked child processes without mmapping the memory
first. This is exactly what we tried to do in syzkaller and
inadvertently came upon this problem.

Allocate the buffer during KCOV_MODE_INIT in order to untie mmap and
coverage collection. Modify kcov_mmap, so that it can be reliably used
any number of times once KCOV_MODE_INIT has succeeded.

Refactor ioctl processing so that a vmalloc could be executed before the
spin lock is obtained.

These changes to the user-facing interface of the tool only weaken the
preconditions, so all existing user space code should remain compatible
with the new version.

Signed-off-by: Aleksandr Nogikh <nogikh@google.com>
---
 kernel/kcov.c | 94 +++++++++++++++++++++++++++++----------------------
 1 file changed, 53 insertions(+), 41 deletions(-)

diff --git a/kernel/kcov.c b/kernel/kcov.c
index 36ca640c4f8e..49e1fa2b330f 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t)
 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 {
 	int res = 0;
-	void *area;
 	struct kcov *kcov = vma->vm_file->private_data;
 	unsigned long size, off;
 	struct page *page;
 	unsigned long flags;
 
-	area = vmalloc_user(vma->vm_end - vma->vm_start);
-	if (!area)
-		return -ENOMEM;
-
 	spin_lock_irqsave(&kcov->lock, flags);
 	size = kcov->size * sizeof(unsigned long);
-	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
+	if (kcov->area == NULL || vma->vm_pgoff != 0 ||
 	    vma->vm_end - vma->vm_start != size) {
 		res = -EINVAL;
 		goto exit;
 	}
-	if (!kcov->area) {
-		kcov->area = area;
-		vma->vm_flags |= VM_DONTEXPAND;
-		spin_unlock_irqrestore(&kcov->lock, flags);
-		for (off = 0; off < size; off += PAGE_SIZE) {
-			page = vmalloc_to_page(kcov->area + off);
-			if (vm_insert_page(vma, vma->vm_start + off, page))
-				WARN_ONCE(1, "vm_insert_page() failed");
-		}
-		return 0;
+	spin_unlock_irqrestore(&kcov->lock, flags);
+	vma->vm_flags |= VM_DONTEXPAND;
+	for (off = 0; off < size; off += PAGE_SIZE) {
+		page = vmalloc_to_page(kcov->area + off);
+		if (vm_insert_page(vma, vma->vm_start + off, page))
+			WARN_ONCE(1, "vm_insert_page() failed");
 	}
+	return 0;
 exit:
 	spin_unlock_irqrestore(&kcov->lock, flags);
-	vfree(area);
 	return res;
 }
 
@@ -564,31 +555,13 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 			     unsigned long arg)
 {
 	struct task_struct *t;
-	unsigned long size, unused;
+	unsigned long unused;
 	int mode, i;
 	struct kcov_remote_arg *remote_arg;
 	struct kcov_remote *remote;
 	unsigned long flags;
 
 	switch (cmd) {
-	case KCOV_INIT_TRACE:
-		/*
-		 * Enable kcov in trace mode and setup buffer size.
-		 * Must happen before anything else.
-		 */
-		if (kcov->mode != KCOV_MODE_DISABLED)
-			return -EBUSY;
-		/*
-		 * Size must be at least 2 to hold current position and one PC.
-		 * Later we allocate size * sizeof(unsigned long) memory,
-		 * that must not overflow.
-		 */
-		size = arg;
-		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
-			return -EINVAL;
-		kcov->size = size;
-		kcov->mode = KCOV_MODE_INIT;
-		return 0;
 	case KCOV_ENABLE:
 		/*
 		 * Enable coverage for the current task.
@@ -685,6 +658,49 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 	}
 }
 
+static int kcov_ioctl_unlocked(struct kcov *kcov, unsigned int cmd,
+			     unsigned long arg)
+{
+	unsigned long size, flags;
+	void *area;
+	int res;
+
+	switch (cmd) {
+	case KCOV_INIT_TRACE:
+		/*
+		 * Enable kcov in trace mode and setup buffer size.
+		 * Must happen before anything else.
+		 *
+		 *
+		 * Size must be at least 2 to hold current position and one PC.
+		 */
+		size = arg;
+		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
+			return -EINVAL;
+
+		area = vmalloc_user(size * sizeof(unsigned long));
+		if (area == NULL)
+			return -ENOMEM;
+
+		spin_lock_irqsave(&kcov->lock, flags);
+		if (kcov->mode != KCOV_MODE_DISABLED) {
+			spin_unlock_irqrestore(&kcov->lock, flags);
+			vfree(area);
+			return -EBUSY;
+		}
+		kcov->area = area;
+		kcov->size = size;
+		kcov->mode = KCOV_MODE_INIT;
+		spin_unlock_irqrestore(&kcov->lock, flags);
+		return 0;
+	default:
+		spin_lock_irqsave(&kcov->lock, flags);
+		res = kcov_ioctl_locked(kcov, cmd, arg);
+		spin_unlock_irqrestore(&kcov->lock, flags);
+		return res;
+	}
+}
+
 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 {
 	struct kcov *kcov;
@@ -692,7 +708,6 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 	struct kcov_remote_arg *remote_arg = NULL;
 	unsigned int remote_num_handles;
 	unsigned long remote_arg_size;
-	unsigned long flags;
 
 	if (cmd == KCOV_REMOTE_ENABLE) {
 		if (get_user(remote_num_handles, (unsigned __user *)(arg +
@@ -713,10 +728,7 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 	}
 
 	kcov = filep->private_data;
-	spin_lock_irqsave(&kcov->lock, flags);
-	res = kcov_ioctl_locked(kcov, cmd, arg);
-	spin_unlock_irqrestore(&kcov->lock, flags);
-
+	res = kcov_ioctl_unlocked(kcov, cmd, arg);
 	kfree(remote_arg);
 
 	return res;
-- 
2.34.1.173.g76aa8bc2d0-goog


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] kcov: properly handle subsequent mmap calls
  2021-12-20 15:21 [PATCH] kcov: properly handle subsequent mmap calls Aleksandr Nogikh
@ 2021-12-20 15:59 ` Andrey Konovalov
  2021-12-21 17:05   ` Aleksandr Nogikh
  0 siblings, 1 reply; 3+ messages in thread
From: Andrey Konovalov @ 2021-12-20 15:59 UTC (permalink / raw)
  To: Aleksandr Nogikh
  Cc: kasan-dev, LKML, Andrew Morton, Dmitry Vyukov, Marco Elver,
	Alexander Potapenko, Taras Madan, Sebastian Andrzej Siewior

On Mon, Dec 20, 2021 at 4:22 PM Aleksandr Nogikh <nogikh@google.com> wrote:
>
> Subsequent mmaps of the same kcov descriptor currently do not update the
> virtual memory of the task and yet return 0 (success). This is
> counter-intuitive and may lead to unexpected memory access errors.
>
> Also, this unnecessarily limits the functionality of kcov to only the
> simplest usage scenarios. Kcov instances are effectively forever attached
> to their first address spaces and it becomes impossible to e.g. reuse the
> same kcov handle in forked child processes without mmapping the memory
> first. This is exactly what we tried to do in syzkaller and
> inadvertently came upon this problem.
>
> Allocate the buffer during KCOV_MODE_INIT in order to untie mmap and
> coverage collection. Modify kcov_mmap, so that it can be reliably used
> any number of times once KCOV_MODE_INIT has succeeded.
>
> Refactor ioctl processing so that a vmalloc could be executed before the
> spin lock is obtained.
>
> These changes to the user-facing interface of the tool only weaken the
> preconditions, so all existing user space code should remain compatible
> with the new version.
>
> Signed-off-by: Aleksandr Nogikh <nogikh@google.com>

Hi Aleksandr,

> ---
>  kernel/kcov.c | 94 +++++++++++++++++++++++++++++----------------------
>  1 file changed, 53 insertions(+), 41 deletions(-)
>
> diff --git a/kernel/kcov.c b/kernel/kcov.c
> index 36ca640c4f8e..49e1fa2b330f 100644
> --- a/kernel/kcov.c
> +++ b/kernel/kcov.c
> @@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t)
>  static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
>  {
>         int res = 0;
> -       void *area;
>         struct kcov *kcov = vma->vm_file->private_data;
>         unsigned long size, off;
>         struct page *page;
>         unsigned long flags;
>
> -       area = vmalloc_user(vma->vm_end - vma->vm_start);
> -       if (!area)
> -               return -ENOMEM;
> -
>         spin_lock_irqsave(&kcov->lock, flags);
>         size = kcov->size * sizeof(unsigned long);
> -       if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
> +       if (kcov->area == NULL || vma->vm_pgoff != 0 ||
>             vma->vm_end - vma->vm_start != size) {
>                 res = -EINVAL;
>                 goto exit;
>         }
> -       if (!kcov->area) {
> -               kcov->area = area;
> -               vma->vm_flags |= VM_DONTEXPAND;
> -               spin_unlock_irqrestore(&kcov->lock, flags);
> -               for (off = 0; off < size; off += PAGE_SIZE) {
> -                       page = vmalloc_to_page(kcov->area + off);
> -                       if (vm_insert_page(vma, vma->vm_start + off, page))
> -                               WARN_ONCE(1, "vm_insert_page() failed");
> -               }
> -               return 0;
> +       spin_unlock_irqrestore(&kcov->lock, flags);
> +       vma->vm_flags |= VM_DONTEXPAND;
> +       for (off = 0; off < size; off += PAGE_SIZE) {
> +               page = vmalloc_to_page(kcov->area + off);
> +               if (vm_insert_page(vma, vma->vm_start + off, page))
> +                       WARN_ONCE(1, "vm_insert_page() failed");
>         }
> +       return 0;
>  exit:
>         spin_unlock_irqrestore(&kcov->lock, flags);
> -       vfree(area);
>         return res;
>  }
>
> @@ -564,31 +555,13 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>                              unsigned long arg)
>  {
>         struct task_struct *t;
> -       unsigned long size, unused;
> +       unsigned long unused;
>         int mode, i;
>         struct kcov_remote_arg *remote_arg;
>         struct kcov_remote *remote;
>         unsigned long flags;
>
>         switch (cmd) {
> -       case KCOV_INIT_TRACE:
> -               /*
> -                * Enable kcov in trace mode and setup buffer size.
> -                * Must happen before anything else.
> -                */
> -               if (kcov->mode != KCOV_MODE_DISABLED)
> -                       return -EBUSY;
> -               /*
> -                * Size must be at least 2 to hold current position and one PC.
> -                * Later we allocate size * sizeof(unsigned long) memory,
> -                * that must not overflow.
> -                */
> -               size = arg;
> -               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> -                       return -EINVAL;
> -               kcov->size = size;
> -               kcov->mode = KCOV_MODE_INIT;
> -               return 0;
>         case KCOV_ENABLE:
>                 /*
>                  * Enable coverage for the current task.
> @@ -685,6 +658,49 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
>         }
>  }
>
> +static int kcov_ioctl_unlocked(struct kcov *kcov, unsigned int cmd,
> +                            unsigned long arg)
> +{
> +       unsigned long size, flags;
> +       void *area;
> +       int res;
> +
> +       switch (cmd) {
> +       case KCOV_INIT_TRACE:
> +               /*
> +                * Enable kcov in trace mode and setup buffer size.
> +                * Must happen before anything else.
> +                *
> +                *

Accidental extra lines?

> +                * Size must be at least 2 to hold current position and one PC.
> +                */
> +               size = arg;
> +               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> +                       return -EINVAL;
> +
> +               area = vmalloc_user(size * sizeof(unsigned long));
> +               if (area == NULL)
> +                       return -ENOMEM;
> +
> +               spin_lock_irqsave(&kcov->lock, flags);
> +               if (kcov->mode != KCOV_MODE_DISABLED) {
> +                       spin_unlock_irqrestore(&kcov->lock, flags);
> +                       vfree(area);
> +                       return -EBUSY;
> +               }
> +               kcov->area = area;
> +               kcov->size = size;
> +               kcov->mode = KCOV_MODE_INIT;
> +               spin_unlock_irqrestore(&kcov->lock, flags);
> +               return 0;
> +       default:

I would add a clarifying comment here saying something like:

/* All other commands are handled by kcov_ioctl_locked(). */

> +               spin_lock_irqsave(&kcov->lock, flags);
> +               res = kcov_ioctl_locked(kcov, cmd, arg);
> +               spin_unlock_irqrestore(&kcov->lock, flags);
> +               return res;
> +       }
> +}

Please split this change into two patches:

1. Add kcov_ioctl_unlocked() that handles KCOV_INIT_TRACE special case
without any functional changes.
2. Functional changes to kcov_ioctl_unlocked() and other parts of code.

Otherwise reviewing is a bit hard.

> +
>  static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
>  {
>         struct kcov *kcov;
> @@ -692,7 +708,6 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
>         struct kcov_remote_arg *remote_arg = NULL;
>         unsigned int remote_num_handles;
>         unsigned long remote_arg_size;
> -       unsigned long flags;
>
>         if (cmd == KCOV_REMOTE_ENABLE) {
>                 if (get_user(remote_num_handles, (unsigned __user *)(arg +
> @@ -713,10 +728,7 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
>         }
>
>         kcov = filep->private_data;
> -       spin_lock_irqsave(&kcov->lock, flags);
> -       res = kcov_ioctl_locked(kcov, cmd, arg);
> -       spin_unlock_irqrestore(&kcov->lock, flags);
> -
> +       res = kcov_ioctl_unlocked(kcov, cmd, arg);
>         kfree(remote_arg);
>
>         return res;
> --
> 2.34.1.173.g76aa8bc2d0-goog
>

Thanks!

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] kcov: properly handle subsequent mmap calls
  2021-12-20 15:59 ` Andrey Konovalov
@ 2021-12-21 17:05   ` Aleksandr Nogikh
  0 siblings, 0 replies; 3+ messages in thread
From: Aleksandr Nogikh @ 2021-12-21 17:05 UTC (permalink / raw)
  To: Andrey Konovalov
  Cc: kasan-dev, LKML, Andrew Morton, Dmitry Vyukov, Marco Elver,
	Alexander Potapenko, Taras Madan, Sebastian Andrzej Siewior

Hi Andrey,

Thank you very much for commenting!

I've prepared and sent the v2 of the series:
https://lkml.org/lkml/2021/12/21/737

--
Best Regards,
Aleksandr

On Mon, Dec 20, 2021 at 4:59 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Mon, Dec 20, 2021 at 4:22 PM Aleksandr Nogikh <nogikh@google.com> wrote:
> >
> > Subsequent mmaps of the same kcov descriptor currently do not update the
> > virtual memory of the task and yet return 0 (success). This is
> > counter-intuitive and may lead to unexpected memory access errors.
> >
> > Also, this unnecessarily limits the functionality of kcov to only the
> > simplest usage scenarios. Kcov instances are effectively forever attached
> > to their first address spaces and it becomes impossible to e.g. reuse the
> > same kcov handle in forked child processes without mmapping the memory
> > first. This is exactly what we tried to do in syzkaller and
> > inadvertently came upon this problem.
> >
> > Allocate the buffer during KCOV_MODE_INIT in order to untie mmap and
> > coverage collection. Modify kcov_mmap, so that it can be reliably used
> > any number of times once KCOV_MODE_INIT has succeeded.
> >
> > Refactor ioctl processing so that a vmalloc could be executed before the
> > spin lock is obtained.
> >
> > These changes to the user-facing interface of the tool only weaken the
> > preconditions, so all existing user space code should remain compatible
> > with the new version.
> >
> > Signed-off-by: Aleksandr Nogikh <nogikh@google.com>
>
> Hi Aleksandr,
>
> > ---
> >  kernel/kcov.c | 94 +++++++++++++++++++++++++++++----------------------
> >  1 file changed, 53 insertions(+), 41 deletions(-)
> >
> > diff --git a/kernel/kcov.c b/kernel/kcov.c
> > index 36ca640c4f8e..49e1fa2b330f 100644
> > --- a/kernel/kcov.c
> > +++ b/kernel/kcov.c
> > @@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t)
> >  static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
> >  {
> >         int res = 0;
> > -       void *area;
> >         struct kcov *kcov = vma->vm_file->private_data;
> >         unsigned long size, off;
> >         struct page *page;
> >         unsigned long flags;
> >
> > -       area = vmalloc_user(vma->vm_end - vma->vm_start);
> > -       if (!area)
> > -               return -ENOMEM;
> > -
> >         spin_lock_irqsave(&kcov->lock, flags);
> >         size = kcov->size * sizeof(unsigned long);
> > -       if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
> > +       if (kcov->area == NULL || vma->vm_pgoff != 0 ||
> >             vma->vm_end - vma->vm_start != size) {
> >                 res = -EINVAL;
> >                 goto exit;
> >         }
> > -       if (!kcov->area) {
> > -               kcov->area = area;
> > -               vma->vm_flags |= VM_DONTEXPAND;
> > -               spin_unlock_irqrestore(&kcov->lock, flags);
> > -               for (off = 0; off < size; off += PAGE_SIZE) {
> > -                       page = vmalloc_to_page(kcov->area + off);
> > -                       if (vm_insert_page(vma, vma->vm_start + off, page))
> > -                               WARN_ONCE(1, "vm_insert_page() failed");
> > -               }
> > -               return 0;
> > +       spin_unlock_irqrestore(&kcov->lock, flags);
> > +       vma->vm_flags |= VM_DONTEXPAND;
> > +       for (off = 0; off < size; off += PAGE_SIZE) {
> > +               page = vmalloc_to_page(kcov->area + off);
> > +               if (vm_insert_page(vma, vma->vm_start + off, page))
> > +                       WARN_ONCE(1, "vm_insert_page() failed");
> >         }
> > +       return 0;
> >  exit:
> >         spin_unlock_irqrestore(&kcov->lock, flags);
> > -       vfree(area);
> >         return res;
> >  }
> >
> > @@ -564,31 +555,13 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
> >                              unsigned long arg)
> >  {
> >         struct task_struct *t;
> > -       unsigned long size, unused;
> > +       unsigned long unused;
> >         int mode, i;
> >         struct kcov_remote_arg *remote_arg;
> >         struct kcov_remote *remote;
> >         unsigned long flags;
> >
> >         switch (cmd) {
> > -       case KCOV_INIT_TRACE:
> > -               /*
> > -                * Enable kcov in trace mode and setup buffer size.
> > -                * Must happen before anything else.
> > -                */
> > -               if (kcov->mode != KCOV_MODE_DISABLED)
> > -                       return -EBUSY;
> > -               /*
> > -                * Size must be at least 2 to hold current position and one PC.
> > -                * Later we allocate size * sizeof(unsigned long) memory,
> > -                * that must not overflow.
> > -                */
> > -               size = arg;
> > -               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> > -                       return -EINVAL;
> > -               kcov->size = size;
> > -               kcov->mode = KCOV_MODE_INIT;
> > -               return 0;
> >         case KCOV_ENABLE:
> >                 /*
> >                  * Enable coverage for the current task.
> > @@ -685,6 +658,49 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
> >         }
> >  }
> >
> > +static int kcov_ioctl_unlocked(struct kcov *kcov, unsigned int cmd,
> > +                            unsigned long arg)
> > +{
> > +       unsigned long size, flags;
> > +       void *area;
> > +       int res;
> > +
> > +       switch (cmd) {
> > +       case KCOV_INIT_TRACE:
> > +               /*
> > +                * Enable kcov in trace mode and setup buffer size.
> > +                * Must happen before anything else.
> > +                *
> > +                *
>
> Accidental extra lines?
>
> > +                * Size must be at least 2 to hold current position and one PC.
> > +                */
> > +               size = arg;
> > +               if (size < 2 || size > INT_MAX / sizeof(unsigned long))
> > +                       return -EINVAL;
> > +
> > +               area = vmalloc_user(size * sizeof(unsigned long));
> > +               if (area == NULL)
> > +                       return -ENOMEM;
> > +
> > +               spin_lock_irqsave(&kcov->lock, flags);
> > +               if (kcov->mode != KCOV_MODE_DISABLED) {
> > +                       spin_unlock_irqrestore(&kcov->lock, flags);
> > +                       vfree(area);
> > +                       return -EBUSY;
> > +               }
> > +               kcov->area = area;
> > +               kcov->size = size;
> > +               kcov->mode = KCOV_MODE_INIT;
> > +               spin_unlock_irqrestore(&kcov->lock, flags);
> > +               return 0;
> > +       default:
>
> I would add a clarifying comment here saying something like:
>
> /* All other commands are handled by kcov_ioctl_locked(). */
>
> > +               spin_lock_irqsave(&kcov->lock, flags);
> > +               res = kcov_ioctl_locked(kcov, cmd, arg);
> > +               spin_unlock_irqrestore(&kcov->lock, flags);
> > +               return res;
> > +       }
> > +}
>
> Please split this change into two patches:
>
> 1. Add kcov_ioctl_unlocked() that handles KCOV_INIT_TRACE special case
> without any functional changes.
> 2. Functional changes to kcov_ioctl_unlocked() and other parts of code.
>
> Otherwise reviewing is a bit hard.
>
> > +
> >  static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> >  {
> >         struct kcov *kcov;
> > @@ -692,7 +708,6 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> >         struct kcov_remote_arg *remote_arg = NULL;
> >         unsigned int remote_num_handles;
> >         unsigned long remote_arg_size;
> > -       unsigned long flags;
> >
> >         if (cmd == KCOV_REMOTE_ENABLE) {
> >                 if (get_user(remote_num_handles, (unsigned __user *)(arg +
> > @@ -713,10 +728,7 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> >         }
> >
> >         kcov = filep->private_data;
> > -       spin_lock_irqsave(&kcov->lock, flags);
> > -       res = kcov_ioctl_locked(kcov, cmd, arg);
> > -       spin_unlock_irqrestore(&kcov->lock, flags);
> > -
> > +       res = kcov_ioctl_unlocked(kcov, cmd, arg);
> >         kfree(remote_arg);
> >
> >         return res;
> > --
> > 2.34.1.173.g76aa8bc2d0-goog
> >
>
> Thanks!

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-12-21 17:05 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-20 15:21 [PATCH] kcov: properly handle subsequent mmap calls Aleksandr Nogikh
2021-12-20 15:59 ` Andrey Konovalov
2021-12-21 17:05   ` Aleksandr Nogikh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.