From: Dave Jiang <dave.jiang@intel.com>
To: vkoul@kernel.org
Cc: dmaengine@vger.kernel.org, swathi.kovvuri@intel.com
Subject: Re: [PATCH] dmaengine: check device and channel list for empty
Date: Fri, 5 Jun 2020 11:58:59 -0700 [thread overview]
Message-ID: <448ecefc-7977-4fc4-768f-9b9ce3cfd05d@intel.com> (raw)
In-Reply-To: <158957055210.11529.14023177009907426289.stgit@djiang5-desk3.ch.intel.com>
On 5/15/2020 12:22 PM, Dave Jiang wrote:
> Check dma device list and channel list for empty before iterate as the
> iteration function assume the list to be not empty. With devices and
> channels now being hot pluggable this is a condition that needs to be
> checked. Otherwise it can cause the iterator to spin forever.
>
> Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")
>
> Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com>
Hi Vinod. Ping on this submit.
> ---
> drivers/dma/dmaengine.c | 119 +++++++++++++++++++++++++++++++++++++----------
> 1 file changed, 94 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
> index d31076d9ef25..4d29c5f2fcfd 100644
> --- a/drivers/dma/dmaengine.c
> +++ b/drivers/dma/dmaengine.c
> @@ -83,6 +83,9 @@ static void dmaengine_dbg_summary_show(struct seq_file *s,
> {
> struct dma_chan *chan;
>
> + if (list_empty(&dma_dev->channels))
> + return;
> +
> list_for_each_entry(chan, &dma_dev->channels, device_node) {
> if (chan->client_count) {
> seq_printf(s, " %-13s| %s", dma_chan_name(chan),
> @@ -102,6 +105,11 @@ static int dmaengine_summary_show(struct seq_file *s, void *data)
> struct dma_device *dma_dev = NULL;
>
> mutex_lock(&dma_list_mutex);
> + if (list_empty(&dma_device_list)) {
> + mutex_unlock(&dma_list_mutex);
> + return 0;
> + }
> +
> list_for_each_entry(dma_dev, &dma_device_list, global_node) {
> seq_printf(s, "dma%d (%s): number of channels: %u\n",
> dma_dev->dev_id, dev_name(dma_dev->dev),
> @@ -323,10 +331,15 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
> struct dma_chan *min = NULL;
> struct dma_chan *localmin = NULL;
>
> + if (list_empty(&dma_device_list))
> + return NULL;
> +
> list_for_each_entry(device, &dma_device_list, global_node) {
> if (!dma_has_cap(cap, device->cap_mask) ||
> dma_has_cap(DMA_PRIVATE, device->cap_mask))
> continue;
> + if (list_empty(&device->channels))
> + continue;
> list_for_each_entry(chan, &device->channels, device_node) {
> if (!chan->client_count)
> continue;
> @@ -363,6 +376,9 @@ static void dma_channel_rebalance(void)
> int cpu;
> int cap;
>
> + if (list_empty(&dma_device_list))
> + return;
> +
> /* undo the last distribution */
> for_each_dma_cap_mask(cap, dma_cap_mask_all)
> for_each_possible_cpu(cpu)
> @@ -371,6 +387,8 @@ static void dma_channel_rebalance(void)
> list_for_each_entry(device, &dma_device_list, global_node) {
> if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
> continue;
> + if (list_empty(&device->channels))
> + continue;
> list_for_each_entry(chan, &device->channels, device_node)
> chan->table_count = 0;
> }
> @@ -554,6 +572,10 @@ void dma_issue_pending_all(void)
> struct dma_chan *chan;
>
> rcu_read_lock();
> + if (list_empty(&dma_device_list)) {
> + rcu_read_unlock();
> + return;
> + }
> list_for_each_entry_rcu(device, &dma_device_list, global_node) {
> if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
> continue;
> @@ -611,6 +633,10 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
> dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
> return NULL;
> }
> +
> + if (list_empty(&dev->channels))
> + return NULL;
> +
> /* devices with multiple channels need special handling as we need to
> * ensure that all channels are either private or public.
> */
> @@ -747,6 +773,11 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
>
> /* Find a channel */
> mutex_lock(&dma_list_mutex);
> + if (list_empty(&dma_device_list)) {
> + mutex_unlock(&dma_list_mutex);
> + return NULL;
> + }
> +
> list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
> /* Finds a DMA controller with matching device node */
> if (np && device->dev->of_node && np != device->dev->of_node)
> @@ -817,6 +848,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
>
> /* Try to find the channel via the DMA filter map(s) */
> mutex_lock(&dma_list_mutex);
> + if (list_empty(&dma_device_list)) {
> + mutex_unlock(&dma_list_mutex);
> + return NULL;
> + }
> +
> list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
> dma_cap_mask_t mask;
> const struct dma_slave_map *map = dma_filter_match(d, name, dev);
> @@ -940,10 +976,17 @@ void dmaengine_get(void)
> mutex_lock(&dma_list_mutex);
> dmaengine_ref_count++;
>
> + if (list_empty(&dma_device_list)) {
> + mutex_unlock(&dma_list_mutex);
> + return;
> + }
> +
> /* try to grab channels */
> list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
> if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
> continue;
> + if (list_empty(&device->channels))
> + continue;
> list_for_each_entry(chan, &device->channels, device_node) {
> err = dma_chan_get(chan);
> if (err == -ENODEV) {
> @@ -978,10 +1021,17 @@ void dmaengine_put(void)
> mutex_lock(&dma_list_mutex);
> dmaengine_ref_count--;
> BUG_ON(dmaengine_ref_count < 0);
> + if (list_empty(&dma_device_list)) {
> + mutex_unlock(&dma_list_mutex);
> + return;
> + }
> +
> /* drop channel references */
> list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
> if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
> continue;
> + if (list_empty(&device->channels))
> + continue;
> list_for_each_entry(chan, &device->channels, device_node)
> dma_chan_put(chan);
> }
> @@ -1130,6 +1180,39 @@ void dma_async_device_channel_unregister(struct dma_device *device,
> }
> EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
>
> +static int dma_channel_enumeration(struct dma_device *device)
> +{
> + struct dma_chan *chan;
> + int rc;
> +
> + if (list_empty(&device->channels))
> + return 0;
> +
> + /* represent channels in sysfs. Probably want devs too */
> + list_for_each_entry(chan, &device->channels, device_node) {
> + rc = __dma_async_device_channel_register(device, chan);
> + if (rc < 0)
> + return rc;
> + }
> +
> + /* take references on public channels */
> + if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> + list_for_each_entry(chan, &device->channels, device_node) {
> + /* if clients are already waiting for channels we need
> + * to take references on their behalf
> + */
> + if (dma_chan_get(chan) == -ENODEV) {
> + /* note we can only get here for the first
> + * channel as the remaining channels are
> + * guaranteed to get a reference
> + */
> + return -ENODEV;
> + }
> + }
> +
> + return 0;
> +}
> +
> /**
> * dma_async_device_register - registers DMA devices found
> * @device: &dma_device
> @@ -1245,33 +1328,15 @@ int dma_async_device_register(struct dma_device *device)
> if (rc != 0)
> return rc;
>
> + mutex_lock(&dma_list_mutex);
> mutex_init(&device->chan_mutex);
> ida_init(&device->chan_ida);
> -
> - /* represent channels in sysfs. Probably want devs too */
> - list_for_each_entry(chan, &device->channels, device_node) {
> - rc = __dma_async_device_channel_register(device, chan);
> - if (rc < 0)
> - goto err_out;
> + rc = dma_channel_enumeration(device);
> + if (rc < 0) {
> + mutex_unlock(&dma_list_mutex);
> + goto err_out;
> }
>
> - mutex_lock(&dma_list_mutex);
> - /* take references on public channels */
> - if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
> - list_for_each_entry(chan, &device->channels, device_node) {
> - /* if clients are already waiting for channels we need
> - * to take references on their behalf
> - */
> - if (dma_chan_get(chan) == -ENODEV) {
> - /* note we can only get here for the first
> - * channel as the remaining channels are
> - * guaranteed to get a reference
> - */
> - rc = -ENODEV;
> - mutex_unlock(&dma_list_mutex);
> - goto err_out;
> - }
> - }
> list_add_tail_rcu(&device->global_node, &dma_device_list);
> if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
> device->privatecnt++; /* Always private */
> @@ -1289,6 +1354,9 @@ int dma_async_device_register(struct dma_device *device)
> return rc;
> }
>
> + if (list_empty(&device->channels))
> + return rc;
> +
> list_for_each_entry(chan, &device->channels, device_node) {
> if (chan->local == NULL)
> continue;
> @@ -1315,8 +1383,9 @@ void dma_async_device_unregister(struct dma_device *device)
>
> dmaengine_debug_unregister(device);
>
> - list_for_each_entry_safe(chan, n, &device->channels, device_node)
> - __dma_async_device_channel_unregister(device, chan);
> + if (!list_empty(&device->channels))
> + list_for_each_entry_safe(chan, n, &device->channels, device_node)
> + __dma_async_device_channel_unregister(device, chan);
>
> mutex_lock(&dma_list_mutex);
> /*
>
next prev parent reply other threads:[~2020-06-05 18:59 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-15 19:22 [PATCH] dmaengine: check device and channel list for empty Dave Jiang
2020-06-05 18:58 ` Dave Jiang [this message]
2020-06-24 7:29 ` Vinod Koul
2020-06-25 18:31 ` Dave Jiang
2020-06-26 6:39 ` Vinod Koul
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=448ecefc-7977-4fc4-768f-9b9ce3cfd05d@intel.com \
--to=dave.jiang@intel.com \
--cc=dmaengine@vger.kernel.org \
--cc=swathi.kovvuri@intel.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).