dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] dmaengine: check device and channel list for empty
@ 2020-05-15 19:22 Dave Jiang
  2020-06-05 18:58 ` Dave Jiang
  2020-06-24  7:29 ` Vinod Koul
  0 siblings, 2 replies; 5+ messages in thread
From: Dave Jiang @ 2020-05-15 19:22 UTC (permalink / raw)
  To: vkoul; +Cc: dmaengine, swathi.kovvuri

Check dma device list and channel list for empty before iterate as the
iteration function assume the list to be not empty. With devices and
channels now being hot pluggable this is a condition that needs to be
checked. Otherwise it can cause the iterator to spin forever.

Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")

Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
---
 drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 94 insertions(+), 25 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d31076d9ef25..4d29c5f2fcfd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -83,6 +83,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
 {
 	struct dma_chan *chan;
 
+	if (list_empty(&dma_dev->channels))
+		return;
+
 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
 		if (chan->client_count) {
 			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
@@ -102,6 +105,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
 	struct dma_device *dma_dev = NULL;
 
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return 0;
+	}
+
 	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
 		seq_printf(s, "dma%d (%s): number of channels: %u\n",
 			   dma_dev->dev_id, dev_name(dma_dev->dev),
@@ -323,10 +331,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 	struct dma_chan *min = NULL;
 	struct dma_chan *localmin = NULL;
 
+	if (list_empty(&dma_device_list))
+		return NULL;
+
 	list_for_each_entry(device, &dma_device_list, global_node) {
 		if (!dma_has_cap(cap, device->cap_mask) ||
 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node) {
 			if (!chan->client_count)
 				continue;
@@ -363,6 +376,9 @@ static void dma_channel_rebalance(void)
 	int cpu;
 	int cap;
 
+	if (list_empty(&dma_device_list))
+		return;
+
 	/* undo the last distribution */
 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 		for_each_possible_cpu(cpu)
@@ -371,6 +387,8 @@ static void dma_channel_rebalance(void)
 	list_for_each_entry(device, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node)
 			chan->table_count = 0;
 	}
@@ -554,6 +572,10 @@ void dma_issue_pending_all(void)
 	struct dma_chan *chan;
 
 	rcu_read_lock();
+	if (list_empty(&dma_device_list)) {
+		rcu_read_unlock();
+		return;
+	}
 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
@@ -611,6 +633,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 		return NULL;
 	}
+
+	if (list_empty(&dev->channels))
+		return NULL;
+
 	/* devices with multiple channels need special handling as we need to
 	 * ensure that all channels are either private or public.
 	 */
@@ -747,6 +773,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 
 	/* Find a channel */
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return NULL;
+	}
+
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		/* Finds a DMA controller with matching device node */
 		if (np && device->dev->of_node && np != device->dev->of_node)
@@ -817,6 +848,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 
 	/* Try to find the channel via the DMA filter map(s) */
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return NULL;
+	}
+
 	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 		dma_cap_mask_t mask;
 		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
@@ -940,10 +976,17 @@ void dmaengine_get(void)
 	mutex_lock(&dma_list_mutex);
 	dmaengine_ref_count++;
 
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return;
+	}
+
 	/* try to grab channels */
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node) {
 			err = dma_chan_get(chan);
 			if (err == -ENODEV) {
@@ -978,10 +1021,17 @@ void dmaengine_put(void)
 	mutex_lock(&dma_list_mutex);
 	dmaengine_ref_count--;
 	BUG_ON(dmaengine_ref_count < 0);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return;
+	}
+
 	/* drop channel references */
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node)
 			dma_chan_put(chan);
 	}
@@ -1130,6 +1180,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
 }
 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
 
+static int dma_channel_enumeration(struct dma_device *device)
+{
+	struct dma_chan *chan;
+	int rc;
+
+	if (list_empty(&device->channels))
+		return 0;
+
+	/* represent channels in sysfs. Probably want devs too */
+	list_for_each_entry(chan, &device->channels, device_node) {
+		rc = __dma_async_device_channel_register(device, chan);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* take references on public channels */
+	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+		list_for_each_entry(chan, &device->channels, device_node) {
+			/* if clients are already waiting for channels we need
+			 * to take references on their behalf
+			 */
+			if (dma_chan_get(chan) == -ENODEV) {
+				/* note we can only get here for the first
+				 * channel as the remaining channels are
+				 * guaranteed to get a reference
+				 */
+				return -ENODEV;
+			}
+		}
+
+	return 0;
+}
+
 /**
  * dma_async_device_register - registers DMA devices found
  * @device: &dma_device
@@ -1245,33 +1328,15 @@ int dma_async_device_register(struct dma_device *device)
 	if (rc != 0)
 		return rc;
 
+	mutex_lock(&dma_list_mutex);
 	mutex_init(&device->chan_mutex);
 	ida_init(&device->chan_ida);
-
-	/* represent channels in sysfs. Probably want devs too */
-	list_for_each_entry(chan, &device->channels, device_node) {
-		rc = __dma_async_device_channel_register(device, chan);
-		if (rc < 0)
-			goto err_out;
+	rc = dma_channel_enumeration(device);
+	if (rc < 0) {
+		mutex_unlock(&dma_list_mutex);
+		goto err_out;
 	}
 
-	mutex_lock(&dma_list_mutex);
-	/* take references on public channels */
-	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
-		list_for_each_entry(chan, &device->channels, device_node) {
-			/* if clients are already waiting for channels we need
-			 * to take references on their behalf
-			 */
-			if (dma_chan_get(chan) == -ENODEV) {
-				/* note we can only get here for the first
-				 * channel as the remaining channels are
-				 * guaranteed to get a reference
-				 */
-				rc = -ENODEV;
-				mutex_unlock(&dma_list_mutex);
-				goto err_out;
-			}
-		}
 	list_add_tail_rcu(&device->global_node, &dma_device_list);
 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 		device->privatecnt++;	/* Always private */
@@ -1289,6 +1354,9 @@ int dma_async_device_register(struct dma_device *device)
 		return rc;
 	}
 
+	if (list_empty(&device->channels))
+		return rc;
+
 	list_for_each_entry(chan, &device->channels, device_node) {
 		if (chan->local == NULL)
 			continue;
@@ -1315,8 +1383,9 @@ void dma_async_device_unregister(struct dma_device *device)
 
 	dmaengine_debug_unregister(device);
 
-	list_for_each_entry_safe(chan, n, &device->channels, device_node)
-		__dma_async_device_channel_unregister(device, chan);
+	if (!list_empty(&device->channels))
+		list_for_each_entry_safe(chan, n, &device->channels, device_node)
+			__dma_async_device_channel_unregister(device, chan);
 
 	mutex_lock(&dma_list_mutex);
 	/*


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] dmaengine: check device and channel list for empty
  2020-05-15 19:22 [PATCH] dmaengine: check device and channel list for empty Dave Jiang
@ 2020-06-05 18:58 ` Dave Jiang
  2020-06-24  7:29 ` Vinod Koul
  1 sibling, 0 replies; 5+ messages in thread
From: Dave Jiang @ 2020-06-05 18:58 UTC (permalink / raw)
  To: vkoul; +Cc: dmaengine, swathi.kovvuri



On 5/15/2020 12:22 PM, Dave Jiang wrote:
> Check dma device list and channel list for empty before iterate as the
> iteration function assume the list to be not empty. With devices and
> channels now being hot pluggable this is a condition that needs to be
> checked. Otherwise it can cause the iterator to spin forever.
> 
> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
> 
> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>

Hi Vinod. Ping on this submit.

> ---
>   drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
>   1 file changed, 94 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
> index d31076d9ef25..4d29c5f2fcfd 100644
> --- a/drivers/dma/dmaengine.c
> +++ b/drivers/dma/dmaengine.c
> @@ -83,6 +83,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
>   {
>   	struct dma_chan *chan;
>   
> +	if (list_empty(&dma_dev->channels))
> +		return;
> +
>   	list_for_each_entry(chan, &dma_dev->channels, device_node) {
>   		if (chan->client_count) {
>   			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
> @@ -102,6 +105,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
>   	struct dma_device *dma_dev = NULL;
>   
>   	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return 0;
> +	}
> +
>   	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
>   		seq_printf(s, "dma%d (%s): number of channels: %u\n",
>   			   dma_dev->dev_id, dev_name(dma_dev->dev),
> @@ -323,10 +331,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
>   	struct dma_chan *min = NULL;
>   	struct dma_chan *localmin = NULL;
>   
> +	if (list_empty(&dma_device_list))
> +		return NULL;
> +
>   	list_for_each_entry(device, &dma_device_list, global_node) {
>   		if (!dma_has_cap(cap, device->cap_mask) ||
>   		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>   		list_for_each_entry(chan, &device->channels, device_node) {
>   			if (!chan->client_count)
>   				continue;
> @@ -363,6 +376,9 @@ static void dma_channel_rebalance(void)
>   	int cpu;
>   	int cap;
>   
> +	if (list_empty(&dma_device_list))
> +		return;
> +
>   	/* undo the last distribution */
>   	for_each_dma_cap_mask(cap, dma_cap_mask_all)
>   		for_each_possible_cpu(cpu)
> @@ -371,6 +387,8 @@ static void dma_channel_rebalance(void)
>   	list_for_each_entry(device, &dma_device_list, global_node) {
>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>   		list_for_each_entry(chan, &device->channels, device_node)
>   			chan->table_count = 0;
>   	}
> @@ -554,6 +572,10 @@ void dma_issue_pending_all(void)
>   	struct dma_chan *chan;
>   
>   	rcu_read_lock();
> +	if (list_empty(&dma_device_list)) {
> +		rcu_read_unlock();
> +		return;
> +	}
>   	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   			continue;
> @@ -611,6 +633,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
>   		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
>   		return NULL;
>   	}
> +
> +	if (list_empty(&dev->channels))
> +		return NULL;
> +
>   	/* devices with multiple channels need special handling as we need to
>   	 * ensure that all channels are either private or public.
>   	 */
> @@ -747,6 +773,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
>   
>   	/* Find a channel */
>   	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return NULL;
> +	}
> +
>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>   		/* Finds a DMA controller with matching device node */
>   		if (np && device->dev->of_node && np != device->dev->of_node)
> @@ -817,6 +848,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
>   
>   	/* Try to find the channel via the DMA filter map(s) */
>   	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return NULL;
> +	}
> +
>   	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
>   		dma_cap_mask_t mask;
>   		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
> @@ -940,10 +976,17 @@ void dmaengine_get(void)
>   	mutex_lock(&dma_list_mutex);
>   	dmaengine_ref_count++;
>   
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return;
> +	}
> +
>   	/* try to grab channels */
>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>   		list_for_each_entry(chan, &device->channels, device_node) {
>   			err = dma_chan_get(chan);
>   			if (err == -ENODEV) {
> @@ -978,10 +1021,17 @@ void dmaengine_put(void)
>   	mutex_lock(&dma_list_mutex);
>   	dmaengine_ref_count--;
>   	BUG_ON(dmaengine_ref_count < 0);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return;
> +	}
> +
>   	/* drop channel references */
>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>   		list_for_each_entry(chan, &device->channels, device_node)
>   			dma_chan_put(chan);
>   	}
> @@ -1130,6 +1180,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
>   }
>   EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
>   
> +static int dma_channel_enumeration(struct dma_device *device)
> +{
> +	struct dma_chan *chan;
> +	int rc;
> +
> +	if (list_empty(&device->channels))
> +		return 0;
> +
> +	/* represent channels in sysfs. Probably want devs too */
> +	list_for_each_entry(chan, &device->channels, device_node) {
> +		rc = __dma_async_device_channel_register(device, chan);
> +		if (rc < 0)
> +			return rc;
> +	}
> +
> +	/* take references on public channels */
> +	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> +		list_for_each_entry(chan, &device->channels, device_node) {
> +			/* if clients are already waiting for channels we need
> +			 * to take references on their behalf
> +			 */
> +			if (dma_chan_get(chan) == -ENODEV) {
> +				/* note we can only get here for the first
> +				 * channel as the remaining channels are
> +				 * guaranteed to get a reference
> +				 */
> +				return -ENODEV;
> +			}
> +		}
> +
> +	return 0;
> +}
> +
>   /**
>    * dma_async_device_register - registers DMA devices found
>    * @device: &dma_device
> @@ -1245,33 +1328,15 @@ int dma_async_device_register(struct dma_device *device)
>   	if (rc != 0)
>   		return rc;
>   
> +	mutex_lock(&dma_list_mutex);
>   	mutex_init(&device->chan_mutex);
>   	ida_init(&device->chan_ida);
> -
> -	/* represent channels in sysfs. Probably want devs too */
> -	list_for_each_entry(chan, &device->channels, device_node) {
> -		rc = __dma_async_device_channel_register(device, chan);
> -		if (rc < 0)
> -			goto err_out;
> +	rc = dma_channel_enumeration(device);
> +	if (rc < 0) {
> +		mutex_unlock(&dma_list_mutex);
> +		goto err_out;
>   	}
>   
> -	mutex_lock(&dma_list_mutex);
> -	/* take references on public channels */
> -	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> -		list_for_each_entry(chan, &device->channels, device_node) {
> -			/* if clients are already waiting for channels we need
> -			 * to take references on their behalf
> -			 */
> -			if (dma_chan_get(chan) == -ENODEV) {
> -				/* note we can only get here for the first
> -				 * channel as the remaining channels are
> -				 * guaranteed to get a reference
> -				 */
> -				rc = -ENODEV;
> -				mutex_unlock(&dma_list_mutex);
> -				goto err_out;
> -			}
> -		}
>   	list_add_tail_rcu(&device->global_node, &dma_device_list);
>   	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>   		device->privatecnt++;	/* Always private */
> @@ -1289,6 +1354,9 @@ int dma_async_device_register(struct dma_device *device)
>   		return rc;
>   	}
>   
> +	if (list_empty(&device->channels))
> +		return rc;
> +
>   	list_for_each_entry(chan, &device->channels, device_node) {
>   		if (chan->local == NULL)
>   			continue;
> @@ -1315,8 +1383,9 @@ void dma_async_device_unregister(struct dma_device *device)
>   
>   	dmaengine_debug_unregister(device);
>   
> -	list_for_each_entry_safe(chan, n, &device->channels, device_node)
> -		__dma_async_device_channel_unregister(device, chan);
> +	if (!list_empty(&device->channels))
> +		list_for_each_entry_safe(chan, n, &device->channels, device_node)
> +			__dma_async_device_channel_unregister(device, chan);
>   
>   	mutex_lock(&dma_list_mutex);
>   	/*
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] dmaengine: check device and channel list for empty
  2020-05-15 19:22 [PATCH] dmaengine: check device and channel list for empty Dave Jiang
  2020-06-05 18:58 ` Dave Jiang
@ 2020-06-24  7:29 ` Vinod Koul
  2020-06-25 18:31   ` Dave Jiang
  1 sibling, 1 reply; 5+ messages in thread
From: Vinod Koul @ 2020-06-24  7:29 UTC (permalink / raw)
  To: Dave Jiang; +Cc: dmaengine, swathi.kovvuri

On 15-05-20, 12:22, Dave Jiang wrote:
> Check dma device list and channel list for empty before iterate as the
> iteration function assume the list to be not empty. With devices and
> channels now being hot pluggable this is a condition that needs to be
> checked. Otherwise it can cause the iterator to spin forever.

Can you rebase and resend, they dont apply on next

> 
> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")

Pls drop this empty line
> 
> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
-- 
~Vinod

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] dmaengine: check device and channel list for empty
  2020-06-24  7:29 ` Vinod Koul
@ 2020-06-25 18:31   ` Dave Jiang
  2020-06-26  6:39     ` Vinod Koul
  0 siblings, 1 reply; 5+ messages in thread
From: Dave Jiang @ 2020-06-25 18:31 UTC (permalink / raw)
  To: Vinod Koul; +Cc: dmaengine, swathi.kovvuri



On 6/24/2020 12:29 AM, Vinod Koul wrote:
> On 15-05-20, 12:22, Dave Jiang wrote:
>> Check dma device list and channel list for empty before iterate as the
>> iteration function assume the list to be not empty. With devices and
>> channels now being hot pluggable this is a condition that needs to be
>> checked. Otherwise it can cause the iterator to spin forever.
> 
> Can you rebase and resend, they dont apply on next

Hi Vinod. I'm trying to figure out how to do all the patches outstanding to 
avoid conflicts for you. Some will go to your fixes branch and some will go to 
the next branch. But next doesn't have the patches in fixes. So when you merge 
next later for 5.9, you are going to hit conflict from my patches that went in 
through the fixes branch for 5.8.

> 
>>
>> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
> 
> Pls drop this empty line
>>
>> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] dmaengine: check device and channel list for empty
  2020-06-25 18:31   ` Dave Jiang
@ 2020-06-26  6:39     ` Vinod Koul
  0 siblings, 0 replies; 5+ messages in thread
From: Vinod Koul @ 2020-06-26  6:39 UTC (permalink / raw)
  To: Dave Jiang; +Cc: dmaengine, swathi.kovvuri

On 25-06-20, 11:31, Dave Jiang wrote:
> 
> 
> On 6/24/2020 12:29 AM, Vinod Koul wrote:
> > On 15-05-20, 12:22, Dave Jiang wrote:
> > > Check dma device list and channel list for empty before iterate as the
> > > iteration function assume the list to be not empty. With devices and
> > > channels now being hot pluggable this is a condition that needs to be
> > > checked. Otherwise it can cause the iterator to spin forever.
> > 
> > Can you rebase and resend, they dont apply on next
> 
> Hi Vinod. I'm trying to figure out how to do all the patches outstanding to
> avoid conflicts for you. Some will go to your fixes branch and some will go
> to the next branch. But next doesn't have the patches in fixes. So when you
> merge next later for 5.9, you are going to hit conflict from my patches that
> went in through the fixes branch for 5.8.

I dont typically merge fixes, unless we have a conflicts. I can merge if
there are conflicts, just let me know :-)

-- 
~Vinod

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-06-26  6:39 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-15 19:22 [PATCH] dmaengine: check device and channel list for empty Dave Jiang
2020-06-05 18:58 ` Dave Jiang
2020-06-24  7:29 ` Vinod Koul
2020-06-25 18:31   ` Dave Jiang
2020-06-26  6:39     ` Vinod Koul

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).