All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Ardelean <ardeleanalex@gmail.com>
To: Paul Cercueil <paul@crapouillou.net>
Cc: "Jonathan Cameron" <jic23@kernel.org>,
	"Lars-Peter Clausen" <lars@metafoo.de>,
	"Michael Hennerich" <Michael.Hennerich@analog.com>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	linux-iio <linux-iio@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>,
	linux-media@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linaro-mm-sig@lists.linaro.org
Subject: Re: [PATCH 01/15] iio: buffer-dma: Get rid of incoming/outgoing queues
Date: Tue, 16 Nov 2021 10:23:19 +0200	[thread overview]
Message-ID: <CA+U=DsrdB=LMdLZbSq1GeB0jsLAP08TZk6O=RgtN8ASf7fFzrQ@mail.gmail.com> (raw)
In-Reply-To: <20211115141925.60164-2-paul@crapouillou.net>

On Mon, Nov 15, 2021 at 4:19 PM Paul Cercueil <paul@crapouillou.net> wrote:
>
> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
>
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
>
> Since the new DMABUF based API wouldn't use these incoming and outgoing
> queues anyway, getting rid of them now makes the upcoming changes
> simpler.
>

Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>

> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 68 ++++++++++----------
>  include/linux/iio/buffer-dma.h               |  7 +-
>  2 files changed, 37 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index d348af8b9705..abac88f20104 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>
>  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  {
> -       struct iio_dma_buffer_queue *queue = block->queue;
> -
> -       /*
> -        * The buffer has already been freed by the application, just drop the
> -        * reference.
> -        */
> -       if (block->state != IIO_BLOCK_STATE_DEAD) {
> +       if (block->state != IIO_BLOCK_STATE_DEAD)
>                 block->state = IIO_BLOCK_STATE_DONE;
> -               list_add_tail(&block->head, &queue->outgoing);
> -       }
>  }
>
>  /**
> @@ -317,11 +309,8 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>          * dead. This means we can reset the lists without having to fear
>          * corrution.
>          */
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (queue->fileio.blocks[i]) {
>                         block = queue->fileio.blocks[i];
> @@ -346,7 +335,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>                 }
>
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
>         }
>
>  out_unlock:
> @@ -401,13 +389,18 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
>         struct iio_dev *indio_dev)
>  {
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> -       struct iio_dma_buffer_block *block, *_block;
> +       struct iio_dma_buffer_block *block;
> +       unsigned int i;
>
>         mutex_lock(&queue->lock);
>         queue->active = true;
> -       list_for_each_entry_safe(block, _block, &queue->incoming, head) {
> -               list_del(&block->head);
> -               iio_dma_buffer_submit_block(queue, block);
> +       queue->fileio.next_dequeue = 0;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block->state == IIO_BLOCK_STATE_QUEUED)
> +                       iio_dma_buffer_submit_block(queue, block);
>         }
>         mutex_unlock(&queue->lock);
>
> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>  static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
>         struct iio_dma_buffer_block *block)
>  {
> -       if (block->state == IIO_BLOCK_STATE_DEAD) {
> +       if (block->state == IIO_BLOCK_STATE_DEAD)
>                 iio_buffer_block_put(block);
> -       } else if (queue->active) {
> +       else if (queue->active)
>                 iio_dma_buffer_submit_block(queue, block);
> -       } else {
> +       else
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
> -       }
>  }
>
>  static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
>         struct iio_dma_buffer_queue *queue)
>  {
>         struct iio_dma_buffer_block *block;
> +       unsigned int idx;
>
>         spin_lock_irq(&queue->list_lock);
> -       block = list_first_entry_or_null(&queue->outgoing, struct
> -               iio_dma_buffer_block, head);
> -       if (block != NULL) {
> -               list_del(&block->head);
> +
> +       idx = queue->fileio.next_dequeue;
> +       block = queue->fileio.blocks[idx];
> +
> +       if (block->state == IIO_BLOCK_STATE_DONE) {
>                 block->state = IIO_BLOCK_STATE_DEQUEUED;
> +               idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
> +               queue->fileio.next_dequeue = idx;
> +       } else {
> +               block = NULL;
>         }
> +
>         spin_unlock_irq(&queue->list_lock);
>
>         return block;
> @@ -539,6 +537,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
>         struct iio_dma_buffer_block *block;
>         size_t data_available = 0;
> +       unsigned int i;
>
>         /*
>          * For counting the available bytes we'll use the size of the block not
> @@ -552,8 +551,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>                 data_available += queue->fileio.active_block->size;
>
>         spin_lock_irq(&queue->list_lock);
> -       list_for_each_entry(block, &queue->outgoing, head)
> -               data_available += block->size;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block != queue->fileio.active_block
> +                   && block->state == IIO_BLOCK_STATE_DONE)
> +                       data_available += block->size;
> +       }
> +
>         spin_unlock_irq(&queue->list_lock);
>         mutex_unlock(&queue->lock);
>
> @@ -616,9 +622,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
>         queue->dev = dev;
>         queue->ops = ops;
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -       INIT_LIST_HEAD(&queue->outgoing);
> -
>         mutex_init(&queue->lock);
>         spin_lock_init(&queue->list_lock);
>
> @@ -645,11 +648,8 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
>                         continue;
>                 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
>         }
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (!queue->fileio.blocks[i])
>                         continue;
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index ff15c61bf319..d4ed5ff39d44 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -78,12 +78,15 @@ struct iio_dma_buffer_block {
>   * @active_block: Block being used in read()
>   * @pos: Read offset in the active block
>   * @block_size: Size of each block
> + * @next_dequeue: index of next block that will be dequeued
>   */
>  struct iio_dma_buffer_queue_fileio {
>         struct iio_dma_buffer_block *blocks[2];
>         struct iio_dma_buffer_block *active_block;
>         size_t pos;
>         size_t block_size;
> +
> +       unsigned int next_dequeue;
>  };
>
>  /**
> @@ -97,8 +100,6 @@ struct iio_dma_buffer_queue_fileio {
>   *   atomic context as well as blocks on those lists. This is the outgoing queue
>   *   list and typically also a list of active blocks in the part that handles
>   *   the DMA controller
> - * @incoming: List of buffers on the incoming queue
> - * @outgoing: List of buffers on the outgoing queue
>   * @active: Whether the buffer is currently active
>   * @fileio: FileIO state
>   */
> @@ -109,8 +110,6 @@ struct iio_dma_buffer_queue {
>
>         struct mutex lock;
>         spinlock_t list_lock;
> -       struct list_head incoming;
> -       struct list_head outgoing;
>
>         bool active;
>
> --
> 2.33.0
>

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Ardelean <ardeleanalex@gmail.com>
To: Paul Cercueil <paul@crapouillou.net>
Cc: "Michael Hennerich" <Michael.Hennerich@analog.com>,
	linux-iio <linux-iio@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>,
	dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org,
	"Christian König" <christian.koenig@amd.com>,
	"Jonathan Cameron" <jic23@kernel.org>,
	linux-media@vger.kernel.org
Subject: Re: [PATCH 01/15] iio: buffer-dma: Get rid of incoming/outgoing queues
Date: Tue, 16 Nov 2021 10:23:19 +0200	[thread overview]
Message-ID: <CA+U=DsrdB=LMdLZbSq1GeB0jsLAP08TZk6O=RgtN8ASf7fFzrQ@mail.gmail.com> (raw)
In-Reply-To: <20211115141925.60164-2-paul@crapouillou.net>

On Mon, Nov 15, 2021 at 4:19 PM Paul Cercueil <paul@crapouillou.net> wrote:
>
> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
>
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
>
> Since the new DMABUF based API wouldn't use these incoming and outgoing
> queues anyway, getting rid of them now makes the upcoming changes
> simpler.
>

Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>

> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 68 ++++++++++----------
>  include/linux/iio/buffer-dma.h               |  7 +-
>  2 files changed, 37 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index d348af8b9705..abac88f20104 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>
>  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  {
> -       struct iio_dma_buffer_queue *queue = block->queue;
> -
> -       /*
> -        * The buffer has already been freed by the application, just drop the
> -        * reference.
> -        */
> -       if (block->state != IIO_BLOCK_STATE_DEAD) {
> +       if (block->state != IIO_BLOCK_STATE_DEAD)
>                 block->state = IIO_BLOCK_STATE_DONE;
> -               list_add_tail(&block->head, &queue->outgoing);
> -       }
>  }
>
>  /**
> @@ -317,11 +309,8 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>          * dead. This means we can reset the lists without having to fear
>          * corrution.
>          */
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (queue->fileio.blocks[i]) {
>                         block = queue->fileio.blocks[i];
> @@ -346,7 +335,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>                 }
>
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
>         }
>
>  out_unlock:
> @@ -401,13 +389,18 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
>         struct iio_dev *indio_dev)
>  {
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> -       struct iio_dma_buffer_block *block, *_block;
> +       struct iio_dma_buffer_block *block;
> +       unsigned int i;
>
>         mutex_lock(&queue->lock);
>         queue->active = true;
> -       list_for_each_entry_safe(block, _block, &queue->incoming, head) {
> -               list_del(&block->head);
> -               iio_dma_buffer_submit_block(queue, block);
> +       queue->fileio.next_dequeue = 0;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block->state == IIO_BLOCK_STATE_QUEUED)
> +                       iio_dma_buffer_submit_block(queue, block);
>         }
>         mutex_unlock(&queue->lock);
>
> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>  static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
>         struct iio_dma_buffer_block *block)
>  {
> -       if (block->state == IIO_BLOCK_STATE_DEAD) {
> +       if (block->state == IIO_BLOCK_STATE_DEAD)
>                 iio_buffer_block_put(block);
> -       } else if (queue->active) {
> +       else if (queue->active)
>                 iio_dma_buffer_submit_block(queue, block);
> -       } else {
> +       else
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
> -       }
>  }
>
>  static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
>         struct iio_dma_buffer_queue *queue)
>  {
>         struct iio_dma_buffer_block *block;
> +       unsigned int idx;
>
>         spin_lock_irq(&queue->list_lock);
> -       block = list_first_entry_or_null(&queue->outgoing, struct
> -               iio_dma_buffer_block, head);
> -       if (block != NULL) {
> -               list_del(&block->head);
> +
> +       idx = queue->fileio.next_dequeue;
> +       block = queue->fileio.blocks[idx];
> +
> +       if (block->state == IIO_BLOCK_STATE_DONE) {
>                 block->state = IIO_BLOCK_STATE_DEQUEUED;
> +               idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
> +               queue->fileio.next_dequeue = idx;
> +       } else {
> +               block = NULL;
>         }
> +
>         spin_unlock_irq(&queue->list_lock);
>
>         return block;
> @@ -539,6 +537,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
>         struct iio_dma_buffer_block *block;
>         size_t data_available = 0;
> +       unsigned int i;
>
>         /*
>          * For counting the available bytes we'll use the size of the block not
> @@ -552,8 +551,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>                 data_available += queue->fileio.active_block->size;
>
>         spin_lock_irq(&queue->list_lock);
> -       list_for_each_entry(block, &queue->outgoing, head)
> -               data_available += block->size;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block != queue->fileio.active_block
> +                   && block->state == IIO_BLOCK_STATE_DONE)
> +                       data_available += block->size;
> +       }
> +
>         spin_unlock_irq(&queue->list_lock);
>         mutex_unlock(&queue->lock);
>
> @@ -616,9 +622,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
>         queue->dev = dev;
>         queue->ops = ops;
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -       INIT_LIST_HEAD(&queue->outgoing);
> -
>         mutex_init(&queue->lock);
>         spin_lock_init(&queue->list_lock);
>
> @@ -645,11 +648,8 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
>                         continue;
>                 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
>         }
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (!queue->fileio.blocks[i])
>                         continue;
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index ff15c61bf319..d4ed5ff39d44 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -78,12 +78,15 @@ struct iio_dma_buffer_block {
>   * @active_block: Block being used in read()
>   * @pos: Read offset in the active block
>   * @block_size: Size of each block
> + * @next_dequeue: index of next block that will be dequeued
>   */
>  struct iio_dma_buffer_queue_fileio {
>         struct iio_dma_buffer_block *blocks[2];
>         struct iio_dma_buffer_block *active_block;
>         size_t pos;
>         size_t block_size;
> +
> +       unsigned int next_dequeue;
>  };
>
>  /**
> @@ -97,8 +100,6 @@ struct iio_dma_buffer_queue_fileio {
>   *   atomic context as well as blocks on those lists. This is the outgoing queue
>   *   list and typically also a list of active blocks in the part that handles
>   *   the DMA controller
> - * @incoming: List of buffers on the incoming queue
> - * @outgoing: List of buffers on the outgoing queue
>   * @active: Whether the buffer is currently active
>   * @fileio: FileIO state
>   */
> @@ -109,8 +110,6 @@ struct iio_dma_buffer_queue {
>
>         struct mutex lock;
>         spinlock_t list_lock;
> -       struct list_head incoming;
> -       struct list_head outgoing;
>
>         bool active;
>
> --
> 2.33.0
>

  reply	other threads:[~2021-11-16  8:23 UTC|newest]

Thread overview: 118+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-15 14:19 [PATCH 00/15] iio: buffer-dma: write() and new DMABUF based API Paul Cercueil
2021-11-15 14:19 ` Paul Cercueil
2021-11-15 14:19 ` [PATCH 01/15] iio: buffer-dma: Get rid of incoming/outgoing queues Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:23   ` Alexandru Ardelean [this message]
2021-11-16  8:23     ` Alexandru Ardelean
2021-11-21 14:05   ` Jonathan Cameron
2021-11-21 14:05     ` Jonathan Cameron
2021-11-21 16:23   ` Lars-Peter Clausen
2021-11-21 16:23     ` Lars-Peter Clausen
2021-11-21 17:52     ` Paul Cercueil
2021-11-21 17:52       ` Paul Cercueil
2021-11-21 18:49       ` Lars-Peter Clausen
2021-11-21 18:49         ` Lars-Peter Clausen
2021-11-21 20:08         ` Paul Cercueil
2021-11-21 20:08           ` Paul Cercueil
2021-11-22 15:08           ` Lars-Peter Clausen
2021-11-22 15:08             ` Lars-Peter Clausen
2021-11-22 15:16             ` Paul Cercueil
2021-11-22 15:16               ` Paul Cercueil
2021-11-22 15:17               ` Lars-Peter Clausen
2021-11-22 15:17                 ` Lars-Peter Clausen
2021-11-22 15:27                 ` Paul Cercueil
2021-11-22 15:27                   ` Paul Cercueil
2021-11-15 14:19 ` [PATCH 02/15] iio: buffer-dma: Remove unused iio_buffer_block struct Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:22   ` Alexandru Ardelean
2021-11-16  8:22     ` Alexandru Ardelean
2021-11-15 14:19 ` [PATCH 03/15] iio: buffer-dma: Use round_down() instead of rounddown() Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:26   ` Alexandru Ardelean
2021-11-16  8:26     ` Alexandru Ardelean
2021-11-21 14:08   ` Jonathan Cameron
2021-11-21 14:08     ` Jonathan Cameron
2021-11-22 10:00     ` Paul Cercueil
2021-11-22 10:00       ` Paul Cercueil
2021-11-27 15:15       ` Jonathan Cameron
2021-11-27 15:15         ` Jonathan Cameron
2021-11-15 14:19 ` [PATCH 04/15] iio: buffer-dma: Enable buffer write support Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:52   ` Alexandru Ardelean
2021-11-16  8:52     ` Alexandru Ardelean
2021-11-21 14:20   ` Jonathan Cameron
2021-11-21 14:20     ` Jonathan Cameron
2021-11-21 17:19     ` Paul Cercueil
2021-11-21 17:19       ` Paul Cercueil
2021-11-27 15:17       ` Jonathan Cameron
2021-11-27 15:17         ` Jonathan Cameron
2021-11-15 14:19 ` [PATCH 05/15] iio: buffer-dmaengine: Support specifying buffer direction Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:53   ` Alexandru Ardelean
2021-11-16  8:53     ` Alexandru Ardelean
2021-11-15 14:19 ` [PATCH 06/15] iio: buffer-dmaengine: Enable write support Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16  8:55   ` Alexandru Ardelean
2021-11-16  8:55     ` Alexandru Ardelean
2021-11-15 14:19 ` [PATCH 07/15] iio: core: Add new DMABUF interface infrastructure Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-21 14:31   ` Jonathan Cameron
2021-11-21 14:31     ` Jonathan Cameron
2021-11-15 14:19 ` [PATCH 08/15] iio: buffer-dma: split iio_dma_buffer_fileio_free() function Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-16 10:59   ` Alexandru Ardelean
2021-11-16 10:59     ` Alexandru Ardelean
2021-11-21 13:49     ` Jonathan Cameron
2021-11-21 13:49       ` Jonathan Cameron
2021-11-15 14:19 ` [PATCH 09/15] iio: buffer-dma: Use DMABUFs instead of custom solution Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-15 14:19 ` [PATCH 10/15] iio: buffer-dma: Implement new DMABUF based userspace API Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-15 14:19 ` [PATCH 11/15] iio: buffer-dma: Boost performance using write-combine cache setting Paul Cercueil
2021-11-15 14:19   ` Paul Cercueil
2021-11-18 11:45   ` Paul Cercueil
2021-11-18 11:45     ` Paul Cercueil
2021-11-21 15:00   ` Jonathan Cameron
2021-11-21 15:00     ` Jonathan Cameron
2021-11-21 17:43     ` Paul Cercueil
2021-11-21 17:43       ` Paul Cercueil
2021-11-25 17:29       ` Paul Cercueil
2021-11-25 17:29         ` Paul Cercueil
2021-11-27 16:05         ` Jonathan Cameron
2021-11-27 16:05           ` Jonathan Cameron
2021-11-28 13:25           ` Lars-Peter Clausen
2021-11-28 13:25             ` Lars-Peter Clausen
2021-11-27 15:20       ` Jonathan Cameron
2021-11-27 15:20         ` Jonathan Cameron
2021-11-15 14:22 ` [PATCH 12/15] iio: buffer-dmaengine: Support new DMABUF based userspace API Paul Cercueil
2021-11-15 14:22   ` Paul Cercueil
2021-11-15 14:22   ` [PATCH 13/15] iio: core: Add support for cyclic buffers Paul Cercueil
2021-11-15 14:22     ` Paul Cercueil
2021-11-16  9:50     ` Alexandru Ardelean
2021-11-16  9:50       ` Alexandru Ardelean
2021-11-15 14:22   ` [PATCH 14/15] iio: buffer-dmaengine: " Paul Cercueil
2021-11-15 14:22     ` Paul Cercueil
2021-11-16  9:50     ` Alexandru Ardelean
2021-11-16  9:50       ` Alexandru Ardelean
2021-11-15 14:22   ` [PATCH 15/15] Documentation: iio: Document high-speed DMABUF based API Paul Cercueil
2021-11-15 14:22     ` Paul Cercueil
2021-11-21 15:10     ` Jonathan Cameron
2021-11-21 15:10       ` Jonathan Cameron
2021-11-21 17:46       ` Paul Cercueil
2021-11-21 17:46         ` Paul Cercueil
2021-11-15 14:37 ` [PATCH 00/15] iio: buffer-dma: write() and new " Daniel Vetter
2021-11-15 14:37   ` Daniel Vetter
2021-11-15 14:57   ` Paul Cercueil
2021-11-15 14:57     ` Paul Cercueil
2021-11-16 16:02     ` Daniel Vetter
2021-11-16 16:02       ` Daniel Vetter
2021-11-16 16:31       ` Laurent Pinchart
2021-11-16 16:31         ` Laurent Pinchart
2021-11-17  8:48         ` Christian König
2021-11-17  8:48           ` Christian König
2021-11-17 12:50       ` Paul Cercueil
2021-11-17 12:50         ` Paul Cercueil
2021-11-17 13:42         ` Hennerich, Michael
2021-11-17 13:42           ` Hennerich, Michael
2021-11-21 13:57 ` Jonathan Cameron
2021-11-21 13:57   ` Jonathan Cameron

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CA+U=DsrdB=LMdLZbSq1GeB0jsLAP08TZk6O=RgtN8ASf7fFzrQ@mail.gmail.com' \
    --to=ardeleanalex@gmail.com \
    --cc=Michael.Hennerich@analog.com \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jic23@kernel.org \
    --cc=lars@metafoo.de \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-iio@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=paul@crapouillou.net \
    --cc=sumit.semwal@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.