All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vinod Koul <vinod.koul@intel.com>
To: Sinan Kaya <okaya@codeaurora.org>
Cc: dmaengine@vger.kernel.org, timur@codeaurora.org,
	cov@codeaurora.org, jcm@redhat.com, agross@codeaurora.org,
	arnd@arndb.de, linux-arm-msm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH V7 3/3] dma: add Qualcomm Technologies HIDMA channel driver
Date: Mon, 30 Nov 2015 14:29:48 +0530	[thread overview]
Message-ID: <20151130085948.GC3901@localhost> (raw)
In-Reply-To: <1448245707-27499-4-git-send-email-okaya@codeaurora.org>

On Sun, Nov 22, 2015 at 09:28:25PM -0500, Sinan Kaya wrote:
> This patch adds support for hidma engine. The driver consists
> of two logical blocks. The DMA engine interface and the
> low-level interface. The hardware only supports memcpy/memset
> and this driver only support memcpy interface. HW and driver
> doesn't support slave interface.
> 
> Signed-off-by: Sinan Kaya <okaya@codeaurora.org>
> ---
>  .../devicetree/bindings/dma/qcom_hidma.txt         |  18 +
>  drivers/dma/qcom/Kconfig                           |  10 +
>  drivers/dma/qcom/Makefile                          |   2 +
>  drivers/dma/qcom/hidma.c                           | 706 ++++++++++++++++
>  drivers/dma/qcom/hidma.h                           | 157 ++++
>  drivers/dma/qcom/hidma_dbg.c                       | 217 +++++
>  drivers/dma/qcom/hidma_ll.c                        | 924 +++++++++++++++++++++
>  7 files changed, 2034 insertions(+)

This was one of the reasons for slow review of this. I started few times but
large patches made this getting pushed back

Pls help reviewers by splitting your code which looking at this could have
been done

> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_dma.h>
> +#include <linux/property.h>
> +#include <linux/delay.h>
> +#include <linux/highmem.h>
> +#include <linux/io.h>
> +#include <linux/sched.h>
> +#include <linux/wait.h>
> +#include <linux/acpi.h>
> +#include <linux/irq.h>
> +#include <linux/atomic.h>
> +#include <linux/pm_runtime.h>

Do you need so many headers...?

> +MODULE_PARM_DESC(event_channel_idx,
> +		"event channel index array for the notifications");

Can you explain this parameter in detail pls

> +static void hidma_callback(void *data)
> +{
> +	struct hidma_desc *mdesc = data;
> +	struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
> +	struct dma_device *ddev = mchan->chan.device;
> +	struct hidma_dev *dmadev = to_hidma_dev(ddev);
> +	unsigned long irqflags;
> +	bool queued = false;
> +
> +	spin_lock_irqsave(&mchan->lock, irqflags);
> +	if (mdesc->node.next) {
> +		/* Delete from the active list, add to completed list */
> +		list_move_tail(&mdesc->node, &mchan->completed);
> +		queued = true;
> +	}
> +	spin_unlock_irqrestore(&mchan->lock, irqflags);
> +
> +	hidma_process_completed(dmadev);
> +
> +	if (queued) {
> +		pm_runtime_mark_last_busy(dmadev->ddev.dev);
> +		pm_runtime_put_autosuspend(dmadev->ddev.dev);
> +	}
> +}

Callback is typically referred to client callback upon dma completion, can
you explain what you are trying to do here

> +static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
> +{
> +	struct hidma_chan *mchan;
> +	struct dma_device *ddev;
> +
> +	mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
> +	if (!mchan)
> +		return -ENOMEM;
> +
> +	ddev = &dmadev->ddev;
> +	mchan->dma_sig = dma_sig;
> +	mchan->dmadev = dmadev;
> +	mchan->chan.device = ddev;
> +	dma_cookie_init(&mchan->chan);
> +
> +	INIT_LIST_HEAD(&mchan->free);
> +	INIT_LIST_HEAD(&mchan->prepared);
> +	INIT_LIST_HEAD(&mchan->active);
> +	INIT_LIST_HEAD(&mchan->completed);
> +
> +	spin_lock_init(&mchan->lock);
> +	list_add_tail(&mchan->chan.device_node, &ddev->channels);
> +	dmadev->ddev.chancnt++;

Have you looked at vchan implementation and moving list handling to this
layer ?

> +	return 0;
> +}
> +
> +static void hidma_issue_pending(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +
> +	/* PM will be released in hidma_callback function. */
> +	pm_runtime_get_sync(dmadev->ddev.dev);

Oh no, this is not allowed. pm_runtime_get_sync() can sleep and
issue_pending can be invoked from atomic context.

> +static enum dma_status hidma_tx_status(struct dma_chan *dmach,
> +			dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	enum dma_status ret;
> +
> +	if (mchan->paused)
> +		ret = DMA_PAUSED;

This is not quite right. The status query is for a descriptor and NOT for
channel. Here if the descriptor queried was running then it would be paused
for the rest pending txn, it would be DMA_IN_PROGRESS.

> +static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(txd->chan);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +	struct hidma_desc *mdesc;
> +	unsigned long irqflags;
> +	dma_cookie_t cookie;
> +
> +	if (!hidma_ll_isenabled(dmadev->lldev))
> +		return -ENODEV;

is this not too late for this check, you should fail this on prepare
method


> +static int hidma_alloc_chan_resources(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +	struct hidma_desc *mdesc, *tmp;
> +	unsigned long irqflags;
> +	LIST_HEAD(descs);
> +	unsigned int i;
> +	int rc = 0;
> +
> +	if (mchan->allocated)
> +		return 0;
> +
> +	/* Alloc descriptors for this channel */
> +	for (i = 0; i < dmadev->nr_descriptors; i++) {
> +		mdesc = kzalloc(sizeof(struct hidma_desc), GFP_KERNEL);

GFP_NOWAIT pls, this can be called from atomic context

> +		if (!mdesc) {
> +			rc = -ENOMEM;
> +			break;
> +		}
> +		dma_async_tx_descriptor_init(&mdesc->desc, dmach);
> +		mdesc->desc.flags = DMA_CTRL_ACK;

what is the purpose of setting this flag here

> +static void hidma_free_chan_resources(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *mdma = mchan->dmadev;
> +	struct hidma_desc *mdesc, *tmp;
> +	unsigned long irqflags;
> +	LIST_HEAD(descs);
> +
> +	if (!list_empty(&mchan->prepared) || !list_empty(&mchan->active) ||
> +		!list_empty(&mchan->completed)) {
> +		/*
> +		 * We have unfinished requests waiting.
> +		 * Terminate the request from the hardware.

that sounds as bug.. free should be called when txn are completed/aborted

> +		 */
> +		hidma_cleanup_pending_tre(mdma->lldev, ERR_INFO_SW,
> +					ERR_CODE_UNEXPECTED_TERMINATE);
> +
> +		/* Give enough time for completions to be called. */
> +		msleep(100);

should we not poll/read after delay, also we are still in atomic context

> +	}
> +
> +	spin_lock_irqsave(&mchan->lock, irqflags);
> +	/* Channel must be idle */

sorry I do not like that assumption

> +	/* return all user requests */
> +	list_for_each_entry_safe(mdesc, tmp, &list, node) {
> +		struct dma_async_tx_descriptor *txd = &mdesc->desc;
> +		dma_async_tx_callback callback = mdesc->desc.callback;
> +		void *param = mdesc->desc.callback_param;
> +		enum dma_status status;
> +
> +		dma_descriptor_unmap(txd);
> +
> +		status = hidma_ll_status(dmadev->lldev, mdesc->tre_ch);
> +		/*
> +		 * The API requires that no submissions are done from a
> +		 * callback, so we don't need to drop the lock here
> +		 */

That is correct assumption for async_tx and which is deprecated now.
In fact the converse is true for rest!

> +static int hidma_pause(struct dma_chan *chan)
> +{
> +	struct hidma_chan *mchan;
> +	struct hidma_dev *dmadev;
> +
> +	mchan = to_hidma_chan(chan);
> +	dmadev = to_hidma_dev(mchan->chan.device);
> +	if (!mchan->paused) {
> +		pm_runtime_get_sync(dmadev->ddev.dev);
> +		if (hidma_ll_pause(dmadev->lldev))
> +			dev_warn(dmadev->ddev.dev, "channel did not stop\n");
> +		mchan->paused = true;
> +		pm_runtime_mark_last_busy(dmadev->ddev.dev);
> +		pm_runtime_put_autosuspend(dmadev->ddev.dev);
> +	}
> +	return 0;
> +}

This is interesting, we associate pause/resume for slave dma ops and not for
memcpy ops, what is the reason for adding pause/resume here?

> +static int hidma_remove(struct platform_device *pdev)
> +{
> +	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
> +
> +	pm_runtime_get_sync(dmadev->ddev.dev);
> +	dma_async_device_unregister(&dmadev->ddev);
> +	hidma_debug_uninit(dmadev);
> +	hidma_ll_uninit(dmadev->lldev);
> +	hidma_free(dmadev);
> +
> +	dev_info(&pdev->dev, "HI-DMA engine removed\n");
> +	pm_runtime_put_sync_suspend(&pdev->dev);
> +	pm_runtime_disable(&pdev->dev);

and your irq is still active and can invoke your irq handler!

> +
> +	return 0;
> +}
> +
> +#if IS_ENABLED(CONFIG_ACPI)
> +static const struct acpi_device_id hidma_acpi_ids[] = {
> +	{"QCOM8061"},
Qcom and ACPI, that is v interesting combination !


-- 
~Vinod

WARNING: multiple messages have this Message-ID (diff)
From: vinod.koul@intel.com (Vinod Koul)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH V7 3/3] dma: add Qualcomm Technologies HIDMA channel driver
Date: Mon, 30 Nov 2015 14:29:48 +0530	[thread overview]
Message-ID: <20151130085948.GC3901@localhost> (raw)
In-Reply-To: <1448245707-27499-4-git-send-email-okaya@codeaurora.org>

On Sun, Nov 22, 2015 at 09:28:25PM -0500, Sinan Kaya wrote:
> This patch adds support for hidma engine. The driver consists
> of two logical blocks. The DMA engine interface and the
> low-level interface. The hardware only supports memcpy/memset
> and this driver only support memcpy interface. HW and driver
> doesn't support slave interface.
> 
> Signed-off-by: Sinan Kaya <okaya@codeaurora.org>
> ---
>  .../devicetree/bindings/dma/qcom_hidma.txt         |  18 +
>  drivers/dma/qcom/Kconfig                           |  10 +
>  drivers/dma/qcom/Makefile                          |   2 +
>  drivers/dma/qcom/hidma.c                           | 706 ++++++++++++++++
>  drivers/dma/qcom/hidma.h                           | 157 ++++
>  drivers/dma/qcom/hidma_dbg.c                       | 217 +++++
>  drivers/dma/qcom/hidma_ll.c                        | 924 +++++++++++++++++++++
>  7 files changed, 2034 insertions(+)

This was one of the reasons for slow review of this. I started few times but
large patches made this getting pushed back

Pls help reviewers by splitting your code which looking at this could have
been done

> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_dma.h>
> +#include <linux/property.h>
> +#include <linux/delay.h>
> +#include <linux/highmem.h>
> +#include <linux/io.h>
> +#include <linux/sched.h>
> +#include <linux/wait.h>
> +#include <linux/acpi.h>
> +#include <linux/irq.h>
> +#include <linux/atomic.h>
> +#include <linux/pm_runtime.h>

Do you need so many headers...?

> +MODULE_PARM_DESC(event_channel_idx,
> +		"event channel index array for the notifications");

Can you explain this parameter in detail pls

> +static void hidma_callback(void *data)
> +{
> +	struct hidma_desc *mdesc = data;
> +	struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
> +	struct dma_device *ddev = mchan->chan.device;
> +	struct hidma_dev *dmadev = to_hidma_dev(ddev);
> +	unsigned long irqflags;
> +	bool queued = false;
> +
> +	spin_lock_irqsave(&mchan->lock, irqflags);
> +	if (mdesc->node.next) {
> +		/* Delete from the active list, add to completed list */
> +		list_move_tail(&mdesc->node, &mchan->completed);
> +		queued = true;
> +	}
> +	spin_unlock_irqrestore(&mchan->lock, irqflags);
> +
> +	hidma_process_completed(dmadev);
> +
> +	if (queued) {
> +		pm_runtime_mark_last_busy(dmadev->ddev.dev);
> +		pm_runtime_put_autosuspend(dmadev->ddev.dev);
> +	}
> +}

Callback is typically referred to client callback upon dma completion, can
you explain what you are trying to do here

> +static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
> +{
> +	struct hidma_chan *mchan;
> +	struct dma_device *ddev;
> +
> +	mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
> +	if (!mchan)
> +		return -ENOMEM;
> +
> +	ddev = &dmadev->ddev;
> +	mchan->dma_sig = dma_sig;
> +	mchan->dmadev = dmadev;
> +	mchan->chan.device = ddev;
> +	dma_cookie_init(&mchan->chan);
> +
> +	INIT_LIST_HEAD(&mchan->free);
> +	INIT_LIST_HEAD(&mchan->prepared);
> +	INIT_LIST_HEAD(&mchan->active);
> +	INIT_LIST_HEAD(&mchan->completed);
> +
> +	spin_lock_init(&mchan->lock);
> +	list_add_tail(&mchan->chan.device_node, &ddev->channels);
> +	dmadev->ddev.chancnt++;

Have you looked at vchan implementation and moving list handling to this
layer ?

> +	return 0;
> +}
> +
> +static void hidma_issue_pending(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +
> +	/* PM will be released in hidma_callback function. */
> +	pm_runtime_get_sync(dmadev->ddev.dev);

Oh no, this is not allowed. pm_runtime_get_sync() can sleep and
issue_pending can be invoked from atomic context.

> +static enum dma_status hidma_tx_status(struct dma_chan *dmach,
> +			dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	enum dma_status ret;
> +
> +	if (mchan->paused)
> +		ret = DMA_PAUSED;

This is not quite right. The status query is for a descriptor and NOT for
channel. Here if the descriptor queried was running then it would be paused
for the rest pending txn, it would be DMA_IN_PROGRESS.

> +static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(txd->chan);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +	struct hidma_desc *mdesc;
> +	unsigned long irqflags;
> +	dma_cookie_t cookie;
> +
> +	if (!hidma_ll_isenabled(dmadev->lldev))
> +		return -ENODEV;

is this not too late for this check, you should fail this on prepare
method


> +static int hidma_alloc_chan_resources(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *dmadev = mchan->dmadev;
> +	struct hidma_desc *mdesc, *tmp;
> +	unsigned long irqflags;
> +	LIST_HEAD(descs);
> +	unsigned int i;
> +	int rc = 0;
> +
> +	if (mchan->allocated)
> +		return 0;
> +
> +	/* Alloc descriptors for this channel */
> +	for (i = 0; i < dmadev->nr_descriptors; i++) {
> +		mdesc = kzalloc(sizeof(struct hidma_desc), GFP_KERNEL);

GFP_NOWAIT pls, this can be called from atomic context

> +		if (!mdesc) {
> +			rc = -ENOMEM;
> +			break;
> +		}
> +		dma_async_tx_descriptor_init(&mdesc->desc, dmach);
> +		mdesc->desc.flags = DMA_CTRL_ACK;

what is the purpose of setting this flag here

> +static void hidma_free_chan_resources(struct dma_chan *dmach)
> +{
> +	struct hidma_chan *mchan = to_hidma_chan(dmach);
> +	struct hidma_dev *mdma = mchan->dmadev;
> +	struct hidma_desc *mdesc, *tmp;
> +	unsigned long irqflags;
> +	LIST_HEAD(descs);
> +
> +	if (!list_empty(&mchan->prepared) || !list_empty(&mchan->active) ||
> +		!list_empty(&mchan->completed)) {
> +		/*
> +		 * We have unfinished requests waiting.
> +		 * Terminate the request from the hardware.

that sounds as bug.. free should be called when txn are completed/aborted

> +		 */
> +		hidma_cleanup_pending_tre(mdma->lldev, ERR_INFO_SW,
> +					ERR_CODE_UNEXPECTED_TERMINATE);
> +
> +		/* Give enough time for completions to be called. */
> +		msleep(100);

should we not poll/read after delay, also we are still in atomic context

> +	}
> +
> +	spin_lock_irqsave(&mchan->lock, irqflags);
> +	/* Channel must be idle */

sorry I do not like that assumption

> +	/* return all user requests */
> +	list_for_each_entry_safe(mdesc, tmp, &list, node) {
> +		struct dma_async_tx_descriptor *txd = &mdesc->desc;
> +		dma_async_tx_callback callback = mdesc->desc.callback;
> +		void *param = mdesc->desc.callback_param;
> +		enum dma_status status;
> +
> +		dma_descriptor_unmap(txd);
> +
> +		status = hidma_ll_status(dmadev->lldev, mdesc->tre_ch);
> +		/*
> +		 * The API requires that no submissions are done from a
> +		 * callback, so we don't need to drop the lock here
> +		 */

That is correct assumption for async_tx and which is deprecated now.
In fact the converse is true for rest!

> +static int hidma_pause(struct dma_chan *chan)
> +{
> +	struct hidma_chan *mchan;
> +	struct hidma_dev *dmadev;
> +
> +	mchan = to_hidma_chan(chan);
> +	dmadev = to_hidma_dev(mchan->chan.device);
> +	if (!mchan->paused) {
> +		pm_runtime_get_sync(dmadev->ddev.dev);
> +		if (hidma_ll_pause(dmadev->lldev))
> +			dev_warn(dmadev->ddev.dev, "channel did not stop\n");
> +		mchan->paused = true;
> +		pm_runtime_mark_last_busy(dmadev->ddev.dev);
> +		pm_runtime_put_autosuspend(dmadev->ddev.dev);
> +	}
> +	return 0;
> +}

This is interesting, we associate pause/resume for slave dma ops and not for
memcpy ops, what is the reason for adding pause/resume here?

> +static int hidma_remove(struct platform_device *pdev)
> +{
> +	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
> +
> +	pm_runtime_get_sync(dmadev->ddev.dev);
> +	dma_async_device_unregister(&dmadev->ddev);
> +	hidma_debug_uninit(dmadev);
> +	hidma_ll_uninit(dmadev->lldev);
> +	hidma_free(dmadev);
> +
> +	dev_info(&pdev->dev, "HI-DMA engine removed\n");
> +	pm_runtime_put_sync_suspend(&pdev->dev);
> +	pm_runtime_disable(&pdev->dev);

and your irq is still active and can invoke your irq handler!

> +
> +	return 0;
> +}
> +
> +#if IS_ENABLED(CONFIG_ACPI)
> +static const struct acpi_device_id hidma_acpi_ids[] = {
> +	{"QCOM8061"},
Qcom and ACPI, that is v interesting combination !


-- 
~Vinod

  reply	other threads:[~2015-11-30  8:56 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-23  2:28 [PATCH V7 0/3] dma: add Qualcomm Technologies HIDMA driver Sinan Kaya
2015-11-23  2:28 ` Sinan Kaya
2015-11-23  2:28 ` [PATCH V7 1/3] dma: qcom_bam_dma: move to qcom directory Sinan Kaya
2015-11-23  2:28   ` Sinan Kaya
2015-11-25 21:42   ` Andy Gross
2015-11-25 21:42     ` Andy Gross
2015-11-28 15:06     ` Sinan Kaya
2015-11-28 15:06       ` Sinan Kaya
2015-11-23  2:28 ` [PATCH V7 2/3] dma: add Qualcomm Technologies HIDMA management driver Sinan Kaya
2015-11-23  2:28   ` Sinan Kaya
2015-11-23 18:06   ` Andy Shevchenko
2015-11-23 18:06     ` Andy Shevchenko
2015-11-23 18:49     ` Sinan Kaya
2015-11-23 18:49       ` Sinan Kaya
2015-11-30  8:21   ` Vinod Koul
2015-11-30  8:21     ` Vinod Koul
2015-11-30 14:42     ` Sinan Kaya
2015-11-30 14:42       ` Sinan Kaya
2015-12-01  3:17       ` Vinod Koul
2015-12-01  3:17         ` Vinod Koul
2015-12-01  3:17         ` Vinod Koul
2015-12-02  4:57         ` Sinan Kaya
2015-12-02  4:57           ` Sinan Kaya
2015-12-05  7:58           ` Vinod Koul
2015-12-05  7:58             ` Vinod Koul
2015-12-08 14:36             ` Sinan Kaya
2015-12-08 14:36               ` Sinan Kaya
2015-11-23  2:28 ` [PATCH V7 3/3] dma: add Qualcomm Technologies HIDMA channel driver Sinan Kaya
2015-11-23  2:28   ` Sinan Kaya
2015-11-30  8:59   ` Vinod Koul [this message]
2015-11-30  8:59     ` Vinod Koul
2015-11-30 20:06     ` Sinan Kaya
2015-11-30 20:06       ` Sinan Kaya
2015-12-01 11:34       ` Vinod Koul
2015-12-01 11:34         ` Vinod Koul
2015-12-01 21:16         ` Sinan Kaya
2015-12-01 21:16           ` Sinan Kaya
2015-12-02 19:04           ` Sinan Kaya
2015-12-02 19:04             ` Sinan Kaya
2015-12-05  8:00             ` Vinod Koul
2015-12-05  8:00               ` Vinod Koul
2015-12-08 14:33               ` Sinan Kaya
2015-12-08 14:33                 ` Sinan Kaya
2015-12-10 20:10               ` Sinan Kaya
2015-12-10 20:10                 ` Sinan Kaya
2015-12-11  9:35                 ` Vinod Koul
2015-12-11  9:35                   ` Vinod Koul
2015-12-17 23:52                   ` Sinan Kaya
2015-12-17 23:52                     ` Sinan Kaya

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20151130085948.GC3901@localhost \
    --to=vinod.koul@intel.com \
    --cc=agross@codeaurora.org \
    --cc=arnd@arndb.de \
    --cc=cov@codeaurora.org \
    --cc=dmaengine@vger.kernel.org \
    --cc=jcm@redhat.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=okaya@codeaurora.org \
    --cc=timur@codeaurora.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.