dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] dmaengine: check device and channel list for empty
@ 2020-06-26 18:09 Dave Jiang
  2020-07-02 13:36 ` Vinod Koul
  2020-07-07  6:05 ` Jiri Slaby
  0 siblings, 2 replies; 6+ messages in thread
From: Dave Jiang @ 2020-06-26 18:09 UTC (permalink / raw)
  To: vkoul; +Cc: Swathi Kovvuri, Swathi Kovvuri, dmaengine

Check dma device list and channel list for empty before iterate as the
iteration function assume the list to be not empty. With devices and
channels now being hot pluggable this is a condition that needs to be
checked. Otherwise it can cause the iterator to spin forever.

Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
---

Rebased to dmaengine next tree

 drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 94 insertions(+), 25 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2b06a7a8629d..0d6529eff66f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -85,6 +85,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
 {
 	struct dma_chan *chan;
 
+	if (list_empty(&dma_dev->channels))
+		return;
+
 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
 		if (chan->client_count) {
 			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
@@ -104,6 +107,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
 	struct dma_device *dma_dev = NULL;
 
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return 0;
+	}
+
 	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
 		seq_printf(s, "dma%d (%s): number of channels: %u\n",
 			   dma_dev->dev_id, dev_name(dma_dev->dev),
@@ -324,10 +332,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 	struct dma_chan *min = NULL;
 	struct dma_chan *localmin = NULL;
 
+	if (list_empty(&dma_device_list))
+		return NULL;
+
 	list_for_each_entry(device, &dma_device_list, global_node) {
 		if (!dma_has_cap(cap, device->cap_mask) ||
 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node) {
 			if (!chan->client_count)
 				continue;
@@ -365,6 +378,9 @@ static void dma_channel_rebalance(void)
 	int cpu;
 	int cap;
 
+	if (list_empty(&dma_device_list))
+		return;
+
 	/* undo the last distribution */
 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 		for_each_possible_cpu(cpu)
@@ -373,6 +389,8 @@ static void dma_channel_rebalance(void)
 	list_for_each_entry(device, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node)
 			chan->table_count = 0;
 	}
@@ -556,6 +574,10 @@ void dma_issue_pending_all(void)
 	struct dma_chan *chan;
 
 	rcu_read_lock();
+	if (list_empty(&dma_device_list)) {
+		rcu_read_unlock();
+		return;
+	}
 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
@@ -613,6 +635,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 		return NULL;
 	}
+
+	if (list_empty(&dev->channels))
+		return NULL;
+
 	/* devices with multiple channels need special handling as we need to
 	 * ensure that all channels are either private or public.
 	 */
@@ -749,6 +775,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 
 	/* Find a channel */
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return NULL;
+	}
+
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		/* Finds a DMA controller with matching device node */
 		if (np && device->dev->of_node && np != device->dev->of_node)
@@ -819,6 +850,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 
 	/* Try to find the channel via the DMA filter map(s) */
 	mutex_lock(&dma_list_mutex);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return NULL;
+	}
+
 	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 		dma_cap_mask_t mask;
 		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
@@ -942,10 +978,17 @@ void dmaengine_get(void)
 	mutex_lock(&dma_list_mutex);
 	dmaengine_ref_count++;
 
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return;
+	}
+
 	/* try to grab channels */
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node) {
 			err = dma_chan_get(chan);
 			if (err == -ENODEV) {
@@ -980,10 +1023,17 @@ void dmaengine_put(void)
 	mutex_lock(&dma_list_mutex);
 	dmaengine_ref_count--;
 	BUG_ON(dmaengine_ref_count < 0);
+	if (list_empty(&dma_device_list)) {
+		mutex_unlock(&dma_list_mutex);
+		return;
+	}
+
 	/* drop channel references */
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 			continue;
+		if (list_empty(&device->channels))
+			continue;
 		list_for_each_entry(chan, &device->channels, device_node)
 			dma_chan_put(chan);
 	}
@@ -1132,6 +1182,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
 }
 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
 
+static int dma_channel_enumeration(struct dma_device *device)
+{
+	struct dma_chan *chan;
+	int rc;
+
+	if (list_empty(&device->channels))
+		return 0;
+
+	/* represent channels in sysfs. Probably want devs too */
+	list_for_each_entry(chan, &device->channels, device_node) {
+		rc = __dma_async_device_channel_register(device, chan);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* take references on public channels */
+	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+		list_for_each_entry(chan, &device->channels, device_node) {
+			/* if clients are already waiting for channels we need
+			 * to take references on their behalf
+			 */
+			if (dma_chan_get(chan) == -ENODEV) {
+				/* note we can only get here for the first
+				 * channel as the remaining channels are
+				 * guaranteed to get a reference
+				 */
+				return -ENODEV;
+			}
+		}
+
+	return 0;
+}
+
 /**
  * dma_async_device_register - registers DMA devices found
  * @device:	pointer to &struct dma_device
@@ -1247,33 +1330,15 @@ int dma_async_device_register(struct dma_device *device)
 	if (rc != 0)
 		return rc;
 
+	mutex_lock(&dma_list_mutex);
 	mutex_init(&device->chan_mutex);
 	ida_init(&device->chan_ida);
-
-	/* represent channels in sysfs. Probably want devs too */
-	list_for_each_entry(chan, &device->channels, device_node) {
-		rc = __dma_async_device_channel_register(device, chan);
-		if (rc < 0)
-			goto err_out;
+	rc = dma_channel_enumeration(device);
+	if (rc < 0) {
+		mutex_unlock(&dma_list_mutex);
+		goto err_out;
 	}
 
-	mutex_lock(&dma_list_mutex);
-	/* take references on public channels */
-	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
-		list_for_each_entry(chan, &device->channels, device_node) {
-			/* if clients are already waiting for channels we need
-			 * to take references on their behalf
-			 */
-			if (dma_chan_get(chan) == -ENODEV) {
-				/* note we can only get here for the first
-				 * channel as the remaining channels are
-				 * guaranteed to get a reference
-				 */
-				rc = -ENODEV;
-				mutex_unlock(&dma_list_mutex);
-				goto err_out;
-			}
-		}
 	list_add_tail_rcu(&device->global_node, &dma_device_list);
 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 		device->privatecnt++;	/* Always private */
@@ -1291,6 +1356,9 @@ int dma_async_device_register(struct dma_device *device)
 		return rc;
 	}
 
+	if (list_empty(&device->channels))
+		return rc;
+
 	list_for_each_entry(chan, &device->channels, device_node) {
 		if (chan->local == NULL)
 			continue;
@@ -1317,8 +1385,9 @@ void dma_async_device_unregister(struct dma_device *device)
 
 	dmaengine_debug_unregister(device);
 
-	list_for_each_entry_safe(chan, n, &device->channels, device_node)
-		__dma_async_device_channel_unregister(device, chan);
+	if (!list_empty(&device->channels))
+		list_for_each_entry_safe(chan, n, &device->channels, device_node)
+			__dma_async_device_channel_unregister(device, chan);
 
 	mutex_lock(&dma_list_mutex);
 	/*


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] dmaengine: check device and channel list for empty
  2020-06-26 18:09 [PATCH v2] dmaengine: check device and channel list for empty Dave Jiang
@ 2020-07-02 13:36 ` Vinod Koul
  2020-07-07  6:05 ` Jiri Slaby
  1 sibling, 0 replies; 6+ messages in thread
From: Vinod Koul @ 2020-07-02 13:36 UTC (permalink / raw)
  To: Dave Jiang; +Cc: Swathi Kovvuri, dmaengine

On 26-06-20, 11:09, Dave Jiang wrote:
> Check dma device list and channel list for empty before iterate as the
> iteration function assume the list to be not empty. With devices and
> channels now being hot pluggable this is a condition that needs to be
> checked. Otherwise it can cause the iterator to spin forever.

Applied, thanks

-- 
~Vinod

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] dmaengine: check device and channel list for empty
  2020-06-26 18:09 [PATCH v2] dmaengine: check device and channel list for empty Dave Jiang
  2020-07-02 13:36 ` Vinod Koul
@ 2020-07-07  6:05 ` Jiri Slaby
  2020-07-07  8:50   ` Peter Ujfalusi
  2020-07-07 15:42   ` Dave Jiang
  1 sibling, 2 replies; 6+ messages in thread
From: Jiri Slaby @ 2020-07-07  6:05 UTC (permalink / raw)
  To: Dave Jiang, vkoul; +Cc: Swathi Kovvuri, dmaengine, Linux kernel mailing list

On 26. 06. 20, 20:09, Dave Jiang wrote:
> Check dma device list and channel list for empty before iterate as the
> iteration function assume the list to be not empty. With devices and
> channels now being hot pluggable this is a condition that needs to be
> checked. Otherwise it can cause the iterator to spin forever.

Could you be a little bit more specific how this can spin forever? I.e.
can you attach a stacktrace of such a behaviour?

As in the empty case, "&pos->member" is "head" (look into
list_for_each_entry) and the for loop should loop exactly zero times.

> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
> ---
> 
> Rebased to dmaengine next tree
> 
>  drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 94 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
> index 2b06a7a8629d..0d6529eff66f 100644
> --- a/drivers/dma/dmaengine.c
> +++ b/drivers/dma/dmaengine.c
> @@ -85,6 +85,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
>  {
>  	struct dma_chan *chan;
>  
> +	if (list_empty(&dma_dev->channels))
> +		return;
> +
>  	list_for_each_entry(chan, &dma_dev->channels, device_node) {
>  		if (chan->client_count) {
>  			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
> @@ -104,6 +107,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
>  	struct dma_device *dma_dev = NULL;
>  
>  	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return 0;
> +	}
> +
>  	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
>  		seq_printf(s, "dma%d (%s): number of channels: %u\n",
>  			   dma_dev->dev_id, dev_name(dma_dev->dev),
> @@ -324,10 +332,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
>  	struct dma_chan *min = NULL;
>  	struct dma_chan *localmin = NULL;
>  
> +	if (list_empty(&dma_device_list))
> +		return NULL;
> +
>  	list_for_each_entry(device, &dma_device_list, global_node) {
>  		if (!dma_has_cap(cap, device->cap_mask) ||
>  		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>  		list_for_each_entry(chan, &device->channels, device_node) {
>  			if (!chan->client_count)
>  				continue;
> @@ -365,6 +378,9 @@ static void dma_channel_rebalance(void)
>  	int cpu;
>  	int cap;
>  
> +	if (list_empty(&dma_device_list))
> +		return;
> +
>  	/* undo the last distribution */
>  	for_each_dma_cap_mask(cap, dma_cap_mask_all)
>  		for_each_possible_cpu(cpu)
> @@ -373,6 +389,8 @@ static void dma_channel_rebalance(void)
>  	list_for_each_entry(device, &dma_device_list, global_node) {
>  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>  		list_for_each_entry(chan, &device->channels, device_node)
>  			chan->table_count = 0;
>  	}
> @@ -556,6 +574,10 @@ void dma_issue_pending_all(void)
>  	struct dma_chan *chan;
>  
>  	rcu_read_lock();
> +	if (list_empty(&dma_device_list)) {
> +		rcu_read_unlock();
> +		return;
> +	}
>  	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
>  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  			continue;
> @@ -613,6 +635,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
>  		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
>  		return NULL;
>  	}
> +
> +	if (list_empty(&dev->channels))
> +		return NULL;
> +
>  	/* devices with multiple channels need special handling as we need to
>  	 * ensure that all channels are either private or public.
>  	 */
> @@ -749,6 +775,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
>  
>  	/* Find a channel */
>  	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return NULL;
> +	}
> +
>  	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>  		/* Finds a DMA controller with matching device node */
>  		if (np && device->dev->of_node && np != device->dev->of_node)
> @@ -819,6 +850,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
>  
>  	/* Try to find the channel via the DMA filter map(s) */
>  	mutex_lock(&dma_list_mutex);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return NULL;
> +	}
> +
>  	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
>  		dma_cap_mask_t mask;
>  		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
> @@ -942,10 +978,17 @@ void dmaengine_get(void)
>  	mutex_lock(&dma_list_mutex);
>  	dmaengine_ref_count++;
>  
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return;
> +	}
> +
>  	/* try to grab channels */
>  	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>  		list_for_each_entry(chan, &device->channels, device_node) {
>  			err = dma_chan_get(chan);
>  			if (err == -ENODEV) {
> @@ -980,10 +1023,17 @@ void dmaengine_put(void)
>  	mutex_lock(&dma_list_mutex);
>  	dmaengine_ref_count--;
>  	BUG_ON(dmaengine_ref_count < 0);
> +	if (list_empty(&dma_device_list)) {
> +		mutex_unlock(&dma_list_mutex);
> +		return;
> +	}
> +
>  	/* drop channel references */
>  	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  			continue;
> +		if (list_empty(&device->channels))
> +			continue;
>  		list_for_each_entry(chan, &device->channels, device_node)
>  			dma_chan_put(chan);
>  	}
> @@ -1132,6 +1182,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
>  }
>  EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
>  
> +static int dma_channel_enumeration(struct dma_device *device)
> +{
> +	struct dma_chan *chan;
> +	int rc;
> +
> +	if (list_empty(&device->channels))
> +		return 0;
> +
> +	/* represent channels in sysfs. Probably want devs too */
> +	list_for_each_entry(chan, &device->channels, device_node) {
> +		rc = __dma_async_device_channel_register(device, chan);
> +		if (rc < 0)
> +			return rc;
> +	}
> +
> +	/* take references on public channels */
> +	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> +		list_for_each_entry(chan, &device->channels, device_node) {
> +			/* if clients are already waiting for channels we need
> +			 * to take references on their behalf
> +			 */
> +			if (dma_chan_get(chan) == -ENODEV) {
> +				/* note we can only get here for the first
> +				 * channel as the remaining channels are
> +				 * guaranteed to get a reference
> +				 */
> +				return -ENODEV;
> +			}
> +		}
> +
> +	return 0;
> +}
> +
>  /**
>   * dma_async_device_register - registers DMA devices found
>   * @device:	pointer to &struct dma_device
> @@ -1247,33 +1330,15 @@ int dma_async_device_register(struct dma_device *device)
>  	if (rc != 0)
>  		return rc;
>  
> +	mutex_lock(&dma_list_mutex);
>  	mutex_init(&device->chan_mutex);
>  	ida_init(&device->chan_ida);
> -
> -	/* represent channels in sysfs. Probably want devs too */
> -	list_for_each_entry(chan, &device->channels, device_node) {
> -		rc = __dma_async_device_channel_register(device, chan);
> -		if (rc < 0)
> -			goto err_out;
> +	rc = dma_channel_enumeration(device);
> +	if (rc < 0) {
> +		mutex_unlock(&dma_list_mutex);
> +		goto err_out;
>  	}
>  
> -	mutex_lock(&dma_list_mutex);
> -	/* take references on public channels */
> -	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> -		list_for_each_entry(chan, &device->channels, device_node) {
> -			/* if clients are already waiting for channels we need
> -			 * to take references on their behalf
> -			 */
> -			if (dma_chan_get(chan) == -ENODEV) {
> -				/* note we can only get here for the first
> -				 * channel as the remaining channels are
> -				 * guaranteed to get a reference
> -				 */
> -				rc = -ENODEV;
> -				mutex_unlock(&dma_list_mutex);
> -				goto err_out;
> -			}
> -		}
>  	list_add_tail_rcu(&device->global_node, &dma_device_list);
>  	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>  		device->privatecnt++;	/* Always private */
> @@ -1291,6 +1356,9 @@ int dma_async_device_register(struct dma_device *device)
>  		return rc;
>  	}
>  
> +	if (list_empty(&device->channels))
> +		return rc;
> +
>  	list_for_each_entry(chan, &device->channels, device_node) {
>  		if (chan->local == NULL)
>  			continue;
> @@ -1317,8 +1385,9 @@ void dma_async_device_unregister(struct dma_device *device)
>  
>  	dmaengine_debug_unregister(device);
>  
> -	list_for_each_entry_safe(chan, n, &device->channels, device_node)
> -		__dma_async_device_channel_unregister(device, chan);
> +	if (!list_empty(&device->channels))
> +		list_for_each_entry_safe(chan, n, &device->channels, device_node)
> +			__dma_async_device_channel_unregister(device, chan);
>  
>  	mutex_lock(&dma_list_mutex);
>  	/*



-- 
js
suse labs

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] dmaengine: check device and channel list for empty
  2020-07-07  6:05 ` Jiri Slaby
@ 2020-07-07  8:50   ` Peter Ujfalusi
  2020-07-07 15:45     ` Dave Jiang
  2020-07-07 15:42   ` Dave Jiang
  1 sibling, 1 reply; 6+ messages in thread
From: Peter Ujfalusi @ 2020-07-07  8:50 UTC (permalink / raw)
  To: Jiri Slaby, Dave Jiang, vkoul
  Cc: Swathi Kovvuri, dmaengine, Linux kernel mailing list



On 07/07/2020 9.05, Jiri Slaby wrote:
> On 26. 06. 20, 20:09, Dave Jiang wrote:
>> Check dma device list and channel list for empty before iterate as the
>> iteration function assume the list to be not empty. With devices and
>> channels now being hot pluggable this is a condition that needs to be
>> checked. Otherwise it can cause the iterator to spin forever.
> 
> Could you be a little bit more specific how this can spin forever? I.e.
> can you attach a stacktrace of such a behaviour?
> 
> As in the empty case, "&pos->member" is "head" (look into
> list_for_each_entry) and the for loop should loop exactly zero times.

This is my understanding as well.

Isn't it more plausible that you have race between
dma_async_device_register() / dma_async_device_unregister() /
dma_async_device_channel_register() /
dma_async_device_channel_unregister() ?

It looks like that there is unbalanced locking between
dma_async_device_channel_register() and
dma_async_device_channel_unregister().

The later locks the dma_list_mutex for a short while, while the former
does not.
Both device_register/unregister locks the same dma_list_mutex in some point.

>> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
>> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>> ---
>>
>> Rebased to dmaengine next tree
>>
>>  drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
>>  1 file changed, 94 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
>> index 2b06a7a8629d..0d6529eff66f 100644
>> --- a/drivers/dma/dmaengine.c>> +++ b/drivers/dma/dmaengine.c

...

>> +static int dma_channel_enumeration(struct dma_device *device)
>> +{
>> +	struct dma_chan *chan;
>> +	int rc;
>> +
>> +	if (list_empty(&device->channels))
>> +		return 0;
>> +
>> +	/* represent channels in sysfs. Probably want devs too */
>> +	list_for_each_entry(chan, &device->channels, device_node) {
>> +		rc = __dma_async_device_channel_register(device, chan);
>> +		if (rc < 0)
>> +			return rc;
>> +	}
>> +
>> +	/* take references on public channels */
>> +	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>> +		list_for_each_entry(chan, &device->channels, device_node) {
>> +			/* if clients are already waiting for channels we need
>> +			 * to take references on their behalf
>> +			 */
>> +			if (dma_chan_get(chan) == -ENODEV) {
>> +				/* note we can only get here for the first
>> +				 * channel as the remaining channels are
>> +				 * guaranteed to get a reference
>> +				 */
>> +				return -ENODEV;
>> +			}
>> +		}
>> +
>> +	return 0;
>> +}
>> +
>>  /**
>>   * dma_async_device_register - registers DMA devices found
>>   * @device:	pointer to &struct dma_device
>> @@ -1247,33 +1330,15 @@ int dma_async_device_register(struct dma_device *device)
>>  	if (rc != 0)
>>  		return rc;
>>  
>> +	mutex_lock(&dma_list_mutex);
>>  	mutex_init(&device->chan_mutex);
>>  	ida_init(&device->chan_ida);
>> -
>> -	/* represent channels in sysfs. Probably want devs too */
>> -	list_for_each_entry(chan, &device->channels, device_node) {
>> -		rc = __dma_async_device_channel_register(device, chan);
>> -		if (rc < 0)
>> -			goto err_out;
>>
>> +	rc = dma_channel_enumeration(device);
>> +	if (rc < 0) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		goto err_out;
>>  	}

Here you effectively moved the __dma_async_device_channel_register()
under dma_list_mutex.


>>  
>> -	mutex_lock(&dma_list_mutex);
>> -	/* take references on public channels */
>> -	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>> -		list_for_each_entry(chan, &device->channels, device_node) {
>> -			/* if clients are already waiting for channels we need
>> -			 * to take references on their behalf
>> -			 */
>> -			if (dma_chan_get(chan) == -ENODEV) {
>> -				/* note we can only get here for the first
>> -				 * channel as the remaining channels are
>> -				 * guaranteed to get a reference
>> -				 */
>> -				rc = -ENODEV;
>> -				mutex_unlock(&dma_list_mutex);
>> -				goto err_out;
>> -			}
>> -		}
>>  	list_add_tail_rcu(&device->global_node, &dma_device_list);
>>  	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>  		device->privatecnt++;	/* Always private */
>> @@ -1291,6 +1356,9 @@ int dma_async_device_register(struct dma_device *device)
>>  		return rc;
>>  	}
>>  
>> +	if (list_empty(&device->channels))
>> +		return rc;
>> +
>>  	list_for_each_entry(chan, &device->channels, device_node) {
>>  		if (chan->local == NULL)
>>  			continue;
>> @@ -1317,8 +1385,9 @@ void dma_async_device_unregister(struct dma_device *device)
>>  
>>  	dmaengine_debug_unregister(device);
>>  
>> -	list_for_each_entry_safe(chan, n, &device->channels, device_node)
>> -		__dma_async_device_channel_unregister(device, chan);
>> +	if (!list_empty(&device->channels))
>> +		list_for_each_entry_safe(chan, n, &device->channels, device_node)
>> +			__dma_async_device_channel_unregister(device, chan);
>>  
>>  	mutex_lock(&dma_list_mutex);
>>  	/*
> 
> 
> 

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] dmaengine: check device and channel list for empty
  2020-07-07  6:05 ` Jiri Slaby
  2020-07-07  8:50   ` Peter Ujfalusi
@ 2020-07-07 15:42   ` Dave Jiang
  1 sibling, 0 replies; 6+ messages in thread
From: Dave Jiang @ 2020-07-07 15:42 UTC (permalink / raw)
  To: Jiri Slaby, vkoul; +Cc: Swathi Kovvuri, dmaengine, Linux kernel mailing list



On 7/6/2020 11:05 PM, Jiri Slaby wrote:
> On 26. 06. 20, 20:09, Dave Jiang wrote:
>> Check dma device list and channel list for empty before iterate as the
>> iteration function assume the list to be not empty. With devices and
>> channels now being hot pluggable this is a condition that needs to be
>> checked. Otherwise it can cause the iterator to spin forever.
> 
> Could you be a little bit more specific how this can spin forever? I.e.
> can you attach a stacktrace of such a behaviour?

I can't seem to find the original splat that lead me to the conclusion of it's 
spinning forever. As I recall, the issue seems to produce different splats and 
not always consistent in being reproduced. Here's a partial splat that was 
tracked by the internal bug database. Since with the dma device and channel list 
being are hot added and removed, the device and channel lists can be empty. The 
list_entry() and friends expect the list to not be empty (according to header 
comment), I added the check to ensure that isn't the case before using them in 
dmaengine. With the fix, we can no longer produce any of the splats. So maybe 
the above was a bad description of the issue.

[ 4216.048375]  ? dma_channel_rebalance+0x7b/0x250
[ 4216.056360]  dma_async_device_register+0x349/0x3a0
[ 4216.064604]  idxd_register_dma_device+0x90/0xc0 [idxd]
[ 4216.073175]  idxd_config_bus_probe.cold+0x7d/0x1fc [idxd]
[ 4216.081988]  really_probe+0x159/0x3e0
[ 4216.088791]  driver_probe_device+0xbc/0x100
[ 4216.096138]  device_driver_attach+0x5d/0x70
[ 4216.103463]  bind_store+0xd3/0x110
[ 4216.109913]  drv_attr_store+0x24/0x30
[ 4216.116666]  sysfs_kf_write+0x3e/0x50
[ 4216.123390]  kernfs_fop_write+0xda/0x1b0
[ 4216.130354]  __vfs_write+0x1b/0x40
[ 4216.136661]  vfs_write+0xb9/0x1a0
[ 4216.142802]  ksys_write+0x67/0xe0
[ 4216.148844]  __x64_sys_write+0x1a/0x20
[ 4216.155289]  do_syscall_64+0x57/0x1d0
[ 4216.161642]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[ 4216.169603] RIP: 0033:0x7f4640339b1a
[ 4216.175918] Code: 48 c7 c0 ff ff ff ff eb be


> 
> As in the empty case, "&pos->member" is "head" (look into
> list_for_each_entry) and the for loop should loop exactly zero times.
> 
>> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
>> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>> ---
>>
>> Rebased to dmaengine next tree
>>
>>   drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
>>   1 file changed, 94 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
>> index 2b06a7a8629d..0d6529eff66f 100644
>> --- a/drivers/dma/dmaengine.c
>> +++ b/drivers/dma/dmaengine.c
>> @@ -85,6 +85,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
>>   {
>>   	struct dma_chan *chan;
>>   
>> +	if (list_empty(&dma_dev->channels))
>> +		return;
>> +
>>   	list_for_each_entry(chan, &dma_dev->channels, device_node) {
>>   		if (chan->client_count) {
>>   			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
>> @@ -104,6 +107,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
>>   	struct dma_device *dma_dev = NULL;
>>   
>>   	mutex_lock(&dma_list_mutex);
>> +	if (list_empty(&dma_device_list)) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		return 0;
>> +	}
>> +
>>   	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
>>   		seq_printf(s, "dma%d (%s): number of channels: %u\n",
>>   			   dma_dev->dev_id, dev_name(dma_dev->dev),
>> @@ -324,10 +332,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
>>   	struct dma_chan *min = NULL;
>>   	struct dma_chan *localmin = NULL;
>>   
>> +	if (list_empty(&dma_device_list))
>> +		return NULL;
>> +
>>   	list_for_each_entry(device, &dma_device_list, global_node) {
>>   		if (!dma_has_cap(cap, device->cap_mask) ||
>>   		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   			continue;
>> +		if (list_empty(&device->channels))
>> +			continue;
>>   		list_for_each_entry(chan, &device->channels, device_node) {
>>   			if (!chan->client_count)
>>   				continue;
>> @@ -365,6 +378,9 @@ static void dma_channel_rebalance(void)
>>   	int cpu;
>>   	int cap;
>>   
>> +	if (list_empty(&dma_device_list))
>> +		return;
>> +
>>   	/* undo the last distribution */
>>   	for_each_dma_cap_mask(cap, dma_cap_mask_all)
>>   		for_each_possible_cpu(cpu)
>> @@ -373,6 +389,8 @@ static void dma_channel_rebalance(void)
>>   	list_for_each_entry(device, &dma_device_list, global_node) {
>>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   			continue;
>> +		if (list_empty(&device->channels))
>> +			continue;
>>   		list_for_each_entry(chan, &device->channels, device_node)
>>   			chan->table_count = 0;
>>   	}
>> @@ -556,6 +574,10 @@ void dma_issue_pending_all(void)
>>   	struct dma_chan *chan;
>>   
>>   	rcu_read_lock();
>> +	if (list_empty(&dma_device_list)) {
>> +		rcu_read_unlock();
>> +		return;
>> +	}
>>   	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
>>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   			continue;
>> @@ -613,6 +635,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
>>   		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
>>   		return NULL;
>>   	}
>> +
>> +	if (list_empty(&dev->channels))
>> +		return NULL;
>> +
>>   	/* devices with multiple channels need special handling as we need to
>>   	 * ensure that all channels are either private or public.
>>   	 */
>> @@ -749,6 +775,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
>>   
>>   	/* Find a channel */
>>   	mutex_lock(&dma_list_mutex);
>> +	if (list_empty(&dma_device_list)) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		return NULL;
>> +	}
>> +
>>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>>   		/* Finds a DMA controller with matching device node */
>>   		if (np && device->dev->of_node && np != device->dev->of_node)
>> @@ -819,6 +850,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
>>   
>>   	/* Try to find the channel via the DMA filter map(s) */
>>   	mutex_lock(&dma_list_mutex);
>> +	if (list_empty(&dma_device_list)) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		return NULL;
>> +	}
>> +
>>   	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
>>   		dma_cap_mask_t mask;
>>   		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
>> @@ -942,10 +978,17 @@ void dmaengine_get(void)
>>   	mutex_lock(&dma_list_mutex);
>>   	dmaengine_ref_count++;
>>   
>> +	if (list_empty(&dma_device_list)) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		return;
>> +	}
>> +
>>   	/* try to grab channels */
>>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   			continue;
>> +		if (list_empty(&device->channels))
>> +			continue;
>>   		list_for_each_entry(chan, &device->channels, device_node) {
>>   			err = dma_chan_get(chan);
>>   			if (err == -ENODEV) {
>> @@ -980,10 +1023,17 @@ void dmaengine_put(void)
>>   	mutex_lock(&dma_list_mutex);
>>   	dmaengine_ref_count--;
>>   	BUG_ON(dmaengine_ref_count < 0);
>> +	if (list_empty(&dma_device_list)) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		return;
>> +	}
>> +
>>   	/* drop channel references */
>>   	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
>>   		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   			continue;
>> +		if (list_empty(&device->channels))
>> +			continue;
>>   		list_for_each_entry(chan, &device->channels, device_node)
>>   			dma_chan_put(chan);
>>   	}
>> @@ -1132,6 +1182,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
>>   }
>>   EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
>>   
>> +static int dma_channel_enumeration(struct dma_device *device)
>> +{
>> +	struct dma_chan *chan;
>> +	int rc;
>> +
>> +	if (list_empty(&device->channels))
>> +		return 0;
>> +
>> +	/* represent channels in sysfs. Probably want devs too */
>> +	list_for_each_entry(chan, &device->channels, device_node) {
>> +		rc = __dma_async_device_channel_register(device, chan);
>> +		if (rc < 0)
>> +			return rc;
>> +	}
>> +
>> +	/* take references on public channels */
>> +	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>> +		list_for_each_entry(chan, &device->channels, device_node) {
>> +			/* if clients are already waiting for channels we need
>> +			 * to take references on their behalf
>> +			 */
>> +			if (dma_chan_get(chan) == -ENODEV) {
>> +				/* note we can only get here for the first
>> +				 * channel as the remaining channels are
>> +				 * guaranteed to get a reference
>> +				 */
>> +				return -ENODEV;
>> +			}
>> +		}
>> +
>> +	return 0;
>> +}
>> +
>>   /**
>>    * dma_async_device_register - registers DMA devices found
>>    * @device:	pointer to &struct dma_device
>> @@ -1247,33 +1330,15 @@ int dma_async_device_register(struct dma_device *device)
>>   	if (rc != 0)
>>   		return rc;
>>   
>> +	mutex_lock(&dma_list_mutex);
>>   	mutex_init(&device->chan_mutex);
>>   	ida_init(&device->chan_ida);
>> -
>> -	/* represent channels in sysfs. Probably want devs too */
>> -	list_for_each_entry(chan, &device->channels, device_node) {
>> -		rc = __dma_async_device_channel_register(device, chan);
>> -		if (rc < 0)
>> -			goto err_out;
>> +	rc = dma_channel_enumeration(device);
>> +	if (rc < 0) {
>> +		mutex_unlock(&dma_list_mutex);
>> +		goto err_out;
>>   	}
>>   
>> -	mutex_lock(&dma_list_mutex);
>> -	/* take references on public channels */
>> -	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>> -		list_for_each_entry(chan, &device->channels, device_node) {
>> -			/* if clients are already waiting for channels we need
>> -			 * to take references on their behalf
>> -			 */
>> -			if (dma_chan_get(chan) == -ENODEV) {
>> -				/* note we can only get here for the first
>> -				 * channel as the remaining channels are
>> -				 * guaranteed to get a reference
>> -				 */
>> -				rc = -ENODEV;
>> -				mutex_unlock(&dma_list_mutex);
>> -				goto err_out;
>> -			}
>> -		}
>>   	list_add_tail_rcu(&device->global_node, &dma_device_list);
>>   	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>   		device->privatecnt++;	/* Always private */
>> @@ -1291,6 +1356,9 @@ int dma_async_device_register(struct dma_device *device)
>>   		return rc;
>>   	}
>>   
>> +	if (list_empty(&device->channels))
>> +		return rc;
>> +
>>   	list_for_each_entry(chan, &device->channels, device_node) {
>>   		if (chan->local == NULL)
>>   			continue;
>> @@ -1317,8 +1385,9 @@ void dma_async_device_unregister(struct dma_device *device)
>>   
>>   	dmaengine_debug_unregister(device);
>>   
>> -	list_for_each_entry_safe(chan, n, &device->channels, device_node)
>> -		__dma_async_device_channel_unregister(device, chan);
>> +	if (!list_empty(&device->channels))
>> +		list_for_each_entry_safe(chan, n, &device->channels, device_node)
>> +			__dma_async_device_channel_unregister(device, chan);
>>   
>>   	mutex_lock(&dma_list_mutex);
>>   	/*
> 
> 
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] dmaengine: check device and channel list for empty
  2020-07-07  8:50   ` Peter Ujfalusi
@ 2020-07-07 15:45     ` Dave Jiang
  0 siblings, 0 replies; 6+ messages in thread
From: Dave Jiang @ 2020-07-07 15:45 UTC (permalink / raw)
  To: Peter Ujfalusi, Jiri Slaby, vkoul
  Cc: Swathi Kovvuri, dmaengine, Linux kernel mailing list



On 7/7/2020 1:50 AM, Peter Ujfalusi wrote:
> 
> 
> On 07/07/2020 9.05, Jiri Slaby wrote:
>> On 26. 06. 20, 20:09, Dave Jiang wrote:
>>> Check dma device list and channel list for empty before iterate as the
>>> iteration function assume the list to be not empty. With devices and
>>> channels now being hot pluggable this is a condition that needs to be
>>> checked. Otherwise it can cause the iterator to spin forever.
>>
>> Could you be a little bit more specific how this can spin forever? I.e.
>> can you attach a stacktrace of such a behaviour?
>>
>> As in the empty case, "&pos->member" is "head" (look into
>> list_for_each_entry) and the for loop should loop exactly zero times.
> 
> This is my understanding as well.
> 
> Isn't it more plausible that you have race between
> dma_async_device_register() / dma_async_device_unregister() /
> dma_async_device_channel_register() /
> dma_async_device_channel_unregister() ?
> 
> It looks like that there is unbalanced locking between
> dma_async_device_channel_register() and
> dma_async_device_channel_unregister().
> 
> The later locks the dma_list_mutex for a short while, while the former
> does not.
> Both device_register/unregister locks the same dma_list_mutex in some point.


It is possible there's a race as well in addition to the issue that the patch 
fixes. I'll take a look and see if there's an additional fix for the unbalanced 
locking. Thanks for checking Peter.


> 
>>> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
>>> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>>> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
>>> ---
>>>
>>> Rebased to dmaengine next tree
>>>
>>>   drivers/dma/dmaengine.c |  119 +++++++++++++++++++++++++++++++++++++----------
>>>   1 file changed, 94 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
>>> index 2b06a7a8629d..0d6529eff66f 100644
>>> --- a/drivers/dma/dmaengine.c>> +++ b/drivers/dma/dmaengine.c
> 
> ...
> 
>>> +static int dma_channel_enumeration(struct dma_device *device)
>>> +{
>>> +	struct dma_chan *chan;
>>> +	int rc;
>>> +
>>> +	if (list_empty(&device->channels))
>>> +		return 0;
>>> +
>>> +	/* represent channels in sysfs. Probably want devs too */
>>> +	list_for_each_entry(chan, &device->channels, device_node) {
>>> +		rc = __dma_async_device_channel_register(device, chan);
>>> +		if (rc < 0)
>>> +			return rc;
>>> +	}
>>> +
>>> +	/* take references on public channels */
>>> +	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>> +		list_for_each_entry(chan, &device->channels, device_node) {
>>> +			/* if clients are already waiting for channels we need
>>> +			 * to take references on their behalf
>>> +			 */
>>> +			if (dma_chan_get(chan) == -ENODEV) {
>>> +				/* note we can only get here for the first
>>> +				 * channel as the remaining channels are
>>> +				 * guaranteed to get a reference
>>> +				 */
>>> +				return -ENODEV;
>>> +			}
>>> +		}
>>> +
>>> +	return 0;
>>> +}
>>> +
>>>   /**
>>>    * dma_async_device_register - registers DMA devices found
>>>    * @device:	pointer to &struct dma_device
>>> @@ -1247,33 +1330,15 @@ int dma_async_device_register(struct dma_device *device)
>>>   	if (rc != 0)
>>>   		return rc;
>>>   
>>> +	mutex_lock(&dma_list_mutex);
>>>   	mutex_init(&device->chan_mutex);
>>>   	ida_init(&device->chan_ida);
>>> -
>>> -	/* represent channels in sysfs. Probably want devs too */
>>> -	list_for_each_entry(chan, &device->channels, device_node) {
>>> -		rc = __dma_async_device_channel_register(device, chan);
>>> -		if (rc < 0)
>>> -			goto err_out;
>>>
>>> +	rc = dma_channel_enumeration(device);
>>> +	if (rc < 0) {
>>> +		mutex_unlock(&dma_list_mutex);
>>> +		goto err_out;
>>>   	}
> 
> Here you effectively moved the __dma_async_device_channel_register()
> under dma_list_mutex.
> 
> 
>>>   
>>> -	mutex_lock(&dma_list_mutex);
>>> -	/* take references on public channels */
>>> -	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>> -		list_for_each_entry(chan, &device->channels, device_node) {
>>> -			/* if clients are already waiting for channels we need
>>> -			 * to take references on their behalf
>>> -			 */
>>> -			if (dma_chan_get(chan) == -ENODEV) {
>>> -				/* note we can only get here for the first
>>> -				 * channel as the remaining channels are
>>> -				 * guaranteed to get a reference
>>> -				 */
>>> -				rc = -ENODEV;
>>> -				mutex_unlock(&dma_list_mutex);
>>> -				goto err_out;
>>> -			}
>>> -		}
>>>   	list_add_tail_rcu(&device->global_node, &dma_device_list);
>>>   	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
>>>   		device->privatecnt++;	/* Always private */
>>> @@ -1291,6 +1356,9 @@ int dma_async_device_register(struct dma_device *device)
>>>   		return rc;
>>>   	}
>>>   
>>> +	if (list_empty(&device->channels))
>>> +		return rc;
>>> +
>>>   	list_for_each_entry(chan, &device->channels, device_node) {
>>>   		if (chan->local == NULL)
>>>   			continue;
>>> @@ -1317,8 +1385,9 @@ void dma_async_device_unregister(struct dma_device *device)
>>>   
>>>   	dmaengine_debug_unregister(device);
>>>   
>>> -	list_for_each_entry_safe(chan, n, &device->channels, device_node)
>>> -		__dma_async_device_channel_unregister(device, chan);
>>> +	if (!list_empty(&device->channels))
>>> +		list_for_each_entry_safe(chan, n, &device->channels, device_node)
>>> +			__dma_async_device_channel_unregister(device, chan);
>>>   
>>>   	mutex_lock(&dma_list_mutex);
>>>   	/*
>>
>>
>>
> 
> - Péter
> 
> Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
> Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-07-07 15:45 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-26 18:09 [PATCH v2] dmaengine: check device and channel list for empty Dave Jiang
2020-07-02 13:36 ` Vinod Koul
2020-07-07  6:05 ` Jiri Slaby
2020-07-07  8:50   ` Peter Ujfalusi
2020-07-07 15:45     ` Dave Jiang
2020-07-07 15:42   ` Dave Jiang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).