iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: "Tian, Kevin" <kevin.tian@intel.com>
To: Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Joerg Roedel <joro@8bytes.org>,
	 Alex Williamson <alex.williamson@redhat.com>,
	Lu Baolu <baolu.lu@linux.intel.com>,
	"iommu@lists.linux-foundation.org"
	<iommu@lists.linux-foundation.org>,
	LKML <linux-kernel@vger.kernel.org>,
	David Woodhouse <dwmw2@infradead.org>,
	Jean-Philippe Brucker <jean-philippe@linaro.com>
Cc: "Raj, Ashok" <ashok.raj@intel.com>, Jonathan Cameron <jic23@kernel.org>
Subject: RE: [PATCH 07/10] iommu/ioasid: Use mutex instead of spinlock
Date: Fri, 27 Mar 2020 09:55:43 +0000	[thread overview]
Message-ID: <AADFC41AFE54684AB9EE6CBC0274A5D19D7ED597@SHSMSX104.ccr.corp.intel.com> (raw)
In-Reply-To: <1585158931-1825-8-git-send-email-jacob.jun.pan@linux.intel.com>

> From: Jacob Pan <jacob.jun.pan@linux.intel.com>
> Sent: Thursday, March 26, 2020 1:55 AM
> 
> Each IOASID or set could have multiple users with its own HW context
> to maintain. Often times access to the HW context requires thread context.
> For example, consumers of IOASIDs can register notification blocks to
> sync up its states. Having an atomic notifier is not feasible for these
> update operations.
> 
> This patch converts allocator lock from spinlock to mutex in preparation
> for IOASID notifier.
> 
> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
> ---
>  drivers/iommu/ioasid.c | 45 +++++++++++++++++++++++----------------------
>  1 file changed, 23 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> index f89a595f6978..8612fe6477dc 100644
> --- a/drivers/iommu/ioasid.c
> +++ b/drivers/iommu/ioasid.c
> @@ -98,7 +98,7 @@ struct ioasid_allocator_data {
>  	struct rcu_head rcu;
>  };
> 
> -static DEFINE_SPINLOCK(ioasid_allocator_lock);
> +static DEFINE_MUTEX(ioasid_allocator_lock);
>  static LIST_HEAD(allocators_list);
> 
>  static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
> @@ -121,7 +121,7 @@ static ioasid_t default_alloc(ioasid_t min, ioasid_t
> max, void *opaque)
>  {
>  	ioasid_t id;
> 
> -	if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max),
> GFP_ATOMIC)) {
> +	if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max),
> GFP_KERNEL)) {
>  		pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
>  		return INVALID_IOASID;
>  	}
> @@ -142,7 +142,7 @@ static struct ioasid_allocator_data
> *ioasid_alloc_allocator(struct ioasid_alloca
>  {
>  	struct ioasid_allocator_data *ia_data;
> 
> -	ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
> +	ia_data = kzalloc(sizeof(*ia_data), GFP_KERNEL);
>  	if (!ia_data)
>  		return NULL;
> 
> @@ -184,7 +184,7 @@ int ioasid_register_allocator(struct
> ioasid_allocator_ops *ops)
>  	struct ioasid_allocator_data *pallocator;
>  	int ret = 0;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
> 
>  	ia_data = ioasid_alloc_allocator(ops);
>  	if (!ia_data) {
> @@ -228,12 +228,12 @@ int ioasid_register_allocator(struct
> ioasid_allocator_ops *ops)
>  	}
>  	list_add_tail(&ia_data->list, &allocators_list);
> 
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  	return 0;
>  out_free:
>  	kfree(ia_data);
>  out_unlock:
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(ioasid_register_allocator);
> @@ -251,7 +251,7 @@ void ioasid_unregister_allocator(struct
> ioasid_allocator_ops *ops)
>  	struct ioasid_allocator_data *pallocator;
>  	struct ioasid_allocator_ops *sops;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	if (list_empty(&allocators_list)) {
>  		pr_warn("No custom IOASID allocators active!\n");
>  		goto exit_unlock;
> @@ -296,7 +296,7 @@ void ioasid_unregister_allocator(struct
> ioasid_allocator_ops *ops)
>  	}
> 
>  exit_unlock:
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  }
>  EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
> 
> @@ -313,13 +313,13 @@ int ioasid_attach_data(ioasid_t ioasid, void *data)
>  	struct ioasid_data *ioasid_data;
>  	int ret = 0;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	ioasid_data = xa_load(&active_allocator->xa, ioasid);
>  	if (ioasid_data)
>  		rcu_assign_pointer(ioasid_data->private, data);
>  	else
>  		ret = -ENOENT;
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
> 
>  	/*
>  	 * Wait for readers to stop accessing the old private data, so the
> @@ -374,7 +374,7 @@ ioasid_t ioasid_alloc(int sid, ioasid_t min, ioasid_t
> max, void *private)
>  	 * Custom allocator needs allocator data to perform platform specific
>  	 * operations.
>  	 */
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ?
> active_allocator->ops->pdata : data;
>  	id = active_allocator->ops->alloc(min, max, adata);
>  	if (id == INVALID_IOASID) {
> @@ -383,7 +383,7 @@ ioasid_t ioasid_alloc(int sid, ioasid_t min, ioasid_t
> max, void *private)
>  	}
> 
>  	if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
> -	     xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id),
> GFP_ATOMIC)) {
> +	     xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id),
> GFP_KERNEL)) {
>  		/* Custom allocator needs framework to store and track
> allocation results */
>  		pr_err("Failed to alloc ioasid from %d\n", id);
>  		active_allocator->ops->free(id, active_allocator->ops->pdata);
> @@ -394,10 +394,11 @@ ioasid_t ioasid_alloc(int sid, ioasid_t min, ioasid_t
> max, void *private)
>  	/* Store IOASID in the per set data */
>  	xa_store(&sdata->xa, id, data, GFP_KERNEL);
>  	sdata->nr_ioasids++;
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
> +
>  	return id;
>  exit_free:
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  	kfree(data);
>  	return INVALID_IOASID;
>  }
> @@ -440,9 +441,9 @@ static void ioasid_free_locked(ioasid_t ioasid)
>   */
>  void ioasid_free(ioasid_t ioasid)
>  {
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	ioasid_free_locked(ioasid);
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  }
>  EXPORT_SYMBOL_GPL(ioasid_free);
> 
> @@ -473,7 +474,7 @@ int ioasid_alloc_set(struct ioasid_set *token, ioasid_t
> quota, int *sid)
>  	if (!sdata)
>  		return -ENOMEM;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
> 
>  	ret = xa_alloc(&ioasid_sets, &id, sdata,
>  		       XA_LIMIT(0, ioasid_capacity_avail - quota),
> @@ -497,7 +498,7 @@ int ioasid_alloc_set(struct ioasid_set *token, ioasid_t
> quota, int *sid)
>  	*sid = id;
> 
>  error:
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
> 
>  	return ret;
>  }
> @@ -518,7 +519,7 @@ void ioasid_free_set(int sid, bool destroy_set)
>  	struct ioasid_data *entry;
>  	unsigned long index;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	sdata = xa_load(&ioasid_sets, sid);
>  	if (!sdata) {
>  		pr_err("No IOASID set found to free %d\n", sid);
> @@ -549,7 +550,7 @@ void ioasid_free_set(int sid, bool destroy_set)
>  	}
> 
>  done_unlock:
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
>  }
>  EXPORT_SYMBOL_GPL(ioasid_free_set);
> 
> @@ -613,11 +614,11 @@ int ioasid_find_sid(ioasid_t ioasid)
>  	struct ioasid_data *ioasid_data;
>  	int ret = 0;
> 
> -	spin_lock(&ioasid_allocator_lock);
> +	mutex_lock(&ioasid_allocator_lock);
>  	ioasid_data = xa_load(&active_allocator->xa, ioasid);
>  	ret = (ioasid_data) ? ioasid_data->sdata->sid : -ENOENT;
> 
> -	spin_unlock(&ioasid_allocator_lock);
> +	mutex_unlock(&ioasid_allocator_lock);
> 
>  	return ret;
>  }
> --
> 2.7.4

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  reply	other threads:[~2020-03-27  9:55 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-25 17:55 [PATCH 00/10] IOASID extensions for guest SVA Jacob Pan
2020-03-25 17:55 ` [PATCH 01/10] iommu/ioasid: Introduce system-wide capacity Jacob Pan
2020-03-27  8:07   ` Tian, Kevin
2020-03-27 16:08     ` Jacob Pan
2020-04-01 13:45   ` Jean-Philippe Brucker
2020-04-01 22:50     ` Jacob Pan
2020-03-25 17:55 ` [PATCH 02/10] iommu/vt-d: Set IOASID capacity when SVM is enabled Jacob Pan
2020-03-27  8:08   ` Tian, Kevin
2020-03-25 17:55 ` [PATCH 03/10] iommu/ioasid: Introduce per set allocation APIs Jacob Pan
2020-03-26  2:12   ` Lu Baolu
2020-03-26 21:30     ` Jacob Pan
2020-03-27  8:38   ` Tian, Kevin
2020-03-27 16:59     ` Jacob Pan
2020-03-28  6:32       ` Tian, Kevin
2020-04-01 13:47   ` Jean-Philippe Brucker
2020-04-06 20:02     ` Jacob Pan
2020-04-07 11:01       ` Jean-Philippe Brucker
2020-04-21 21:51         ` Jacob Pan
2020-03-25 17:55 ` [PATCH 04/10] iommu/ioasid: Rename ioasid_set_data to avoid confusion with ioasid_set Jacob Pan
2020-03-27  9:35   ` Tian, Kevin
2020-03-25 17:55 ` [PATCH 05/10] iommu/ioasid: Create an IOASID set for host SVA use Jacob Pan
2020-03-27  9:41   ` Tian, Kevin
2020-03-27 17:28     ` Jacob Pan
2020-03-28  6:33       ` Tian, Kevin
2020-04-01 13:53   ` Jean-Philippe Brucker
2020-04-06 15:33     ` Jacob Pan
2020-04-07 11:01       ` Jean-Philippe Brucker
2020-04-13 22:06         ` Jacob Pan
2020-04-15 15:10           ` Jean-Philippe Brucker
2020-03-25 17:55 ` [PATCH 06/10] iommu/ioasid: Convert to set aware allocations Jacob Pan
2020-03-27  9:54   ` Tian, Kevin
2020-03-27 17:41     ` Jacob Pan
2020-03-28  6:40       ` Tian, Kevin
2020-04-06 20:07         ` Jacob Pan
2020-04-01 13:55   ` Jean-Philippe Brucker
2020-04-01 22:45     ` Jacob Pan
2020-03-25 17:55 ` [PATCH 07/10] iommu/ioasid: Use mutex instead of spinlock Jacob Pan
2020-03-27  9:55   ` Tian, Kevin [this message]
2020-04-01 13:58   ` Jean-Philippe Brucker
2020-03-25 17:55 ` [PATCH 08/10] iommu/ioasid: Introduce notifier APIs Jacob Pan
2020-03-27 10:03   ` Tian, Kevin
2020-03-27 18:36     ` Jacob Pan
2020-03-28  6:43       ` Tian, Kevin
2020-03-31 15:13         ` Jacob Pan
2020-04-01 14:00   ` Jean-Philippe Brucker
2020-04-10 15:43     ` Jacob Pan
2020-03-25 17:55 ` [PATCH 09/10] iommu/ioasid: Support ioasid_set quota adjustment Jacob Pan
2020-03-27 10:09   ` Tian, Kevin
2020-03-27 23:30     ` Jacob Pan
2020-03-28  6:44       ` Tian, Kevin
2020-03-25 17:55 ` [PATCH 10/10] iommu/vt-d: Register PASID notifier for status change Jacob Pan
2020-03-27 10:22   ` Tian, Kevin
2020-03-27 23:47     ` Jacob Pan
2020-04-01 14:03 ` [PATCH 00/10] IOASID extensions for guest SVA Jean-Philippe Brucker
2020-04-01 23:38   ` Jacob Pan
2020-04-02 12:26     ` Jean-Philippe Brucker
2020-04-02 16:09       ` Jacob Pan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=AADFC41AFE54684AB9EE6CBC0274A5D19D7ED597@SHSMSX104.ccr.corp.intel.com \
    --to=kevin.tian@intel.com \
    --cc=alex.williamson@redhat.com \
    --cc=ashok.raj@intel.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=dwmw2@infradead.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=jean-philippe@linaro.com \
    --cc=jic23@kernel.org \
    --cc=joro@8bytes.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).