All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next V3] ptp: fix corrupted list in ptp_open
@ 2023-11-03 13:15 Edward Adam Davis
  2023-11-03 23:18 ` Richard Cochran
  2023-11-04  2:07 ` Richard Cochran
  0 siblings, 2 replies; 5+ messages in thread
From: Edward Adam Davis @ 2023-11-03 13:15 UTC (permalink / raw)
  To: jeremy
  Cc: davem, habetsm.xilinx, linux-kernel, netdev, reibax,
	richardcochran, syzbot+df3f3ef31f60781fa911

There is no lock protection when writing ptp->tsevqs in ptp_open(),
ptp_release(), which can cause data corruption, use mutex lock to avoid this
issue.

Moreover, ptp_release() should not be used to release the queue in ptp_read(),
and it should be deleted together.

Reported-and-tested-by: syzbot+df3f3ef31f60781fa911@syzkaller.appspotmail.com
Fixes: 8f5de6fb2453 ("ptp: support multiple timestamp event readers")
Signed-off-by: Edward Adam Davis <eadavis@qq.com>
---
 drivers/ptp/ptp_chardev.c | 12 ++++++++++--
 drivers/ptp/ptp_clock.c   |  3 +++
 drivers/ptp/ptp_private.h |  1 +
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 282cd7d24077..6e9762a54b14 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -119,8 +119,13 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
 	}
 	bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
 	spin_lock_init(&queue->lock);
+	if (mutex_lock_interruptible(&ptp->tsevq_mux)) {
+		kfree(queue);
+		return -ERESTARTSYS;
+	}
 	list_add_tail(&queue->qlist, &ptp->tsevqs);
 	pccontext->private_clkdata = queue;
+	mutex_unlock(&ptp->tsevq_mux);
 
 	/* Debugfs contents */
 	sprintf(debugfsname, "0x%p", queue);
@@ -138,14 +143,19 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
 int ptp_release(struct posix_clock_context *pccontext)
 {
 	struct timestamp_event_queue *queue = pccontext->private_clkdata;
+	struct ptp_clock *ptp =
+		container_of(pccontext->clk, struct ptp_clock, clock);
 	unsigned long flags;
 
 	if (queue) {
+		if (mutex_lock_interruptible(&ptp->tsevq_mux)) 
+			return -ERESTARTSYS;
 		debugfs_remove(queue->debugfs_instance);
 		pccontext->private_clkdata = NULL;
 		spin_lock_irqsave(&queue->lock, flags);
 		list_del(&queue->qlist);
 		spin_unlock_irqrestore(&queue->lock, flags);
+		mutex_unlock(&ptp->tsevq_mux);
 		bitmap_free(queue->mask);
 		kfree(queue);
 	}
@@ -585,7 +595,5 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
 free_event:
 	kfree(event);
 exit:
-	if (result < 0)
-		ptp_release(pccontext);
 	return result;
 }
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 3d1b0a97301c..7930db6ec18d 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -176,6 +176,7 @@ static void ptp_clock_release(struct device *dev)
 
 	ptp_cleanup_pin_groups(ptp);
 	kfree(ptp->vclock_index);
+	mutex_destroy(&ptp->tsevq_mux);
 	mutex_destroy(&ptp->pincfg_mux);
 	mutex_destroy(&ptp->n_vclocks_mux);
 	/* Delete first entry */
@@ -247,6 +248,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
 	if (!queue)
 		goto no_memory_queue;
 	list_add_tail(&queue->qlist, &ptp->tsevqs);
+	mutex_init(&ptp->tsevq_mux);
 	queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
 	if (!queue->mask)
 		goto no_memory_bitmap;
@@ -356,6 +358,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
 	if (ptp->kworker)
 		kthread_destroy_worker(ptp->kworker);
 kworker_err:
+	mutex_destroy(&ptp->tsevq_mux);
 	mutex_destroy(&ptp->pincfg_mux);
 	mutex_destroy(&ptp->n_vclocks_mux);
 	bitmap_free(queue->mask);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 52f87e394aa6..1525bd2059ba 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -44,6 +44,7 @@ struct ptp_clock {
 	struct pps_device *pps_source;
 	long dialed_frequency; /* remembers the frequency adjustment */
 	struct list_head tsevqs; /* timestamp fifo list */
+	struct mutex tsevq_mux; /* one process at a time reading the fifo */
 	struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
 	wait_queue_head_t tsev_wq;
 	int defunct; /* tells readers to go away when clock is being removed */
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next V3] ptp: fix corrupted list in ptp_open
  2023-11-03 13:15 [PATCH net-next V3] ptp: fix corrupted list in ptp_open Edward Adam Davis
@ 2023-11-03 23:18 ` Richard Cochran
  2023-11-03 23:24   ` Richard Cochran
  2023-11-04  2:08   ` Richard Cochran
  2023-11-04  2:07 ` Richard Cochran
  1 sibling, 2 replies; 5+ messages in thread
From: Richard Cochran @ 2023-11-03 23:18 UTC (permalink / raw)
  To: Edward Adam Davis
  Cc: jeremy, davem, habetsm.xilinx, linux-kernel, netdev, reibax,
	syzbot+df3f3ef31f60781fa911

On Fri, Nov 03, 2023 at 09:15:03PM +0800, Edward Adam Davis wrote:
> There is no lock protection when writing ptp->tsevqs in ptp_open(),
> ptp_release(), which can cause data corruption, use mutex lock to avoid this
> issue.

The problem is the bogus call to ptp_release() in ptp_read().

Just delete that.

No need for another mutex.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next V3] ptp: fix corrupted list in ptp_open
  2023-11-03 23:18 ` Richard Cochran
@ 2023-11-03 23:24   ` Richard Cochran
  2023-11-04  2:08   ` Richard Cochran
  1 sibling, 0 replies; 5+ messages in thread
From: Richard Cochran @ 2023-11-03 23:24 UTC (permalink / raw)
  To: Edward Adam Davis
  Cc: jeremy, davem, habetsm.xilinx, linux-kernel, netdev, reibax,
	syzbot+df3f3ef31f60781fa911

On Fri, Nov 03, 2023 at 04:18:44PM -0700, Richard Cochran wrote:
> On Fri, Nov 03, 2023 at 09:15:03PM +0800, Edward Adam Davis wrote:
> > There is no lock protection when writing ptp->tsevqs in ptp_open(),
> > ptp_release(), which can cause data corruption, use mutex lock to avoid this
> > issue.
> 
> The problem is the bogus call to ptp_release() in ptp_read().
> 
> Just delete that.

Like this...

diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 282cd7d24077..27c1ef493617 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -585,7 +585,5 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
 free_event:
 	kfree(event);
 exit:
-	if (result < 0)
-		ptp_release(pccontext);
 	return result;
 }

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next V3] ptp: fix corrupted list in ptp_open
  2023-11-03 13:15 [PATCH net-next V3] ptp: fix corrupted list in ptp_open Edward Adam Davis
  2023-11-03 23:18 ` Richard Cochran
@ 2023-11-04  2:07 ` Richard Cochran
  1 sibling, 0 replies; 5+ messages in thread
From: Richard Cochran @ 2023-11-04  2:07 UTC (permalink / raw)
  To: Edward Adam Davis
  Cc: jeremy, davem, habetsm.xilinx, linux-kernel, netdev, reibax,
	syzbot+df3f3ef31f60781fa911

On Fri, Nov 03, 2023 at 09:15:03PM +0800, Edward Adam Davis wrote:
> There is no lock protection when writing ptp->tsevqs in ptp_open(),
> ptp_release(), which can cause data corruption, use mutex lock to avoid this
> issue.
> 
> Moreover, ptp_release() should not be used to release the queue in ptp_read(),
> and it should be deleted together.

Oh, now I see what you are fixing...

> @@ -138,14 +143,19 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
>  int ptp_release(struct posix_clock_context *pccontext)
>  {
>  	struct timestamp_event_queue *queue = pccontext->private_clkdata;
> +	struct ptp_clock *ptp =
> +		container_of(pccontext->clk, struct ptp_clock, clock);
>  	unsigned long flags;
>  
>  	if (queue) {
> +		if (mutex_lock_interruptible(&ptp->tsevq_mux)) 
> +			return -ERESTARTSYS;

I don't think it is a good idea to return ERESTARTSYS on signal here.
The release method needs to succeed.

>  		debugfs_remove(queue->debugfs_instance);
>  		pccontext->private_clkdata = NULL;
>  		spin_lock_irqsave(&queue->lock, flags);

This spin lock is wrong.  The spin lock protects the queue, not the
list of queues.

The spin lock/unlock needs to be replaced with mutex lock/unlock.

>  		list_del(&queue->qlist);
>  		spin_unlock_irqrestore(&queue->lock, flags);
> +		mutex_unlock(&ptp->tsevq_mux);
>  		bitmap_free(queue->mask);
>  		kfree(queue);
>  	}

> diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
> index 52f87e394aa6..1525bd2059ba 100644
> --- a/drivers/ptp/ptp_private.h
> +++ b/drivers/ptp/ptp_private.h
> @@ -44,6 +44,7 @@ struct ptp_clock {
>  	struct pps_device *pps_source;
>  	long dialed_frequency; /* remembers the frequency adjustment */
>  	struct list_head tsevqs; /* timestamp fifo list */
> +	struct mutex tsevq_mux; /* one process at a time reading the fifo */

This comment is very misleading.  The mutex does not protect the
fifo.  It protects 'tsevqs' from concurrent access.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next V3] ptp: fix corrupted list in ptp_open
  2023-11-03 23:18 ` Richard Cochran
  2023-11-03 23:24   ` Richard Cochran
@ 2023-11-04  2:08   ` Richard Cochran
  1 sibling, 0 replies; 5+ messages in thread
From: Richard Cochran @ 2023-11-04  2:08 UTC (permalink / raw)
  To: Edward Adam Davis
  Cc: jeremy, davem, habetsm.xilinx, linux-kernel, netdev, reibax,
	syzbot+df3f3ef31f60781fa911

On Fri, Nov 03, 2023 at 04:18:44PM -0700, Richard Cochran wrote:
> No need for another mutex.

Actually the mutex is needed.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-11-04  2:08 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-03 13:15 [PATCH net-next V3] ptp: fix corrupted list in ptp_open Edward Adam Davis
2023-11-03 23:18 ` Richard Cochran
2023-11-03 23:24   ` Richard Cochran
2023-11-04  2:08   ` Richard Cochran
2023-11-04  2:07 ` Richard Cochran

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.