From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 72642C43334 for ; Tue, 14 Jun 2022 03:00:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231284AbiFNDAM (ORCPT ); Mon, 13 Jun 2022 23:00:12 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52678 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1349618AbiFNC7K (ORCPT ); Mon, 13 Jun 2022 22:59:10 -0400 Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3E6A763D0 for ; Mon, 13 Jun 2022 19:56:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1655175370; x=1686711370; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=X4v9Bmx8d9POOXLVdnulFn+5nL6EUfwlXyM8GuGzRxw=; b=l4NOlHQiwuorKnmkHXQvliEkfcTAclwAosQBYExDk7E215DTPfN44zoG YNh+WBpKbEaAOxoy7Wm8XW9DkpYQapf2d/y1CuKOWbVD00e8gDvd4kjSI qwTOmwmjvkouAws6qI5dSgXBPUe1/HWlrEPTYH1fR6PUXFFQWIGJyoh5/ oKE0N+oi2FPy9v5UeZxcDiiYp+aEqVBJc8g3tdc1EhXqziqajE7UeEFuf M1gs5m4dy09f/KsWH/MudzwxPp8L7kVtS6/jvBuRtn/9pbB0oQacb5So2 +5RVkWLs22U0q80nwa3ulyv2FoTIJb9sQv/4c4DmXDmR+46aAcK3cnVpn w==; X-IronPort-AV: E=McAfee;i="6400,9594,10377"; a="258930439" X-IronPort-AV: E=Sophos;i="5.91,298,1647327600"; d="scan'208";a="258930439" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Jun 2022 19:56:08 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.91,298,1647327600"; d="scan'208";a="588166745" Received: from allen-box.sh.intel.com ([10.239.159.48]) by fmsmga007.fm.intel.com with ESMTP; 13 Jun 2022 19:56:05 -0700 From: Lu Baolu To: Joerg Roedel , Kevin Tian , Ashok Raj , Christoph Hellwig , Jason Gunthorpe Cc: Will Deacon , Robin Murphy , Liu Yi L , Jacob jun Pan , iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org, Lu Baolu Subject: [PATCH v2 11/12] iommu/vt-d: Use device_domain_lock accurately Date: Tue, 14 Jun 2022 10:51:36 +0800 Message-Id: <20220614025137.1632762-12-baolu.lu@linux.intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220614025137.1632762-1-baolu.lu@linux.intel.com> References: <20220614025137.1632762-1-baolu.lu@linux.intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org The device_domain_lock is used to protect the device tracking list of a domain. Remove unnecessary spin_lock/unlock()'s and move the necessary ones around the list access. Signed-off-by: Lu Baolu --- drivers/iommu/intel/iommu.c | 68 +++++++++++++++---------------------- 1 file changed, 27 insertions(+), 41 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 8345e0c0824c..aa3dea1c9f13 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -534,16 +534,10 @@ static int domain_update_device_node(struct dmar_domain *domain) { struct device_domain_info *info; int nid = NUMA_NO_NODE; + unsigned long flags; - assert_spin_locked(&device_domain_lock); - - if (list_empty(&domain->devices)) - return NUMA_NO_NODE; - + spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry(info, &domain->devices, link) { - if (!info->dev) - continue; - /* * There could possibly be multiple device numa nodes as devices * within the same domain may sit behind different IOMMUs. There @@ -554,6 +548,7 @@ static int domain_update_device_node(struct dmar_domain *domain) if (nid != NUMA_NO_NODE) break; } + spin_unlock_irqrestore(&device_domain_lock, flags); return nid; } @@ -1376,49 +1371,50 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, } static struct device_domain_info * -iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, - u8 bus, u8 devfn) +iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu, + u8 bus, u8 devfn) { - struct device_domain_info *info; - - assert_spin_locked(&device_domain_lock); + struct device_domain_info *info = NULL, *tmp; + unsigned long flags; if (!iommu->qi) return NULL; - list_for_each_entry(info, &domain->devices, link) - if (info->iommu == iommu && info->bus == bus && - info->devfn == devfn) { - if (info->ats_supported && info->dev) - return info; + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(tmp, &domain->devices, link) { + if (tmp->iommu == iommu && tmp->bus == bus && + tmp->devfn == devfn) { + if (tmp->ats_supported) + info = tmp; break; } + } + spin_unlock_irqrestore(&device_domain_lock, flags); - return NULL; + return info; } static void domain_update_iotlb(struct dmar_domain *domain) { struct device_domain_info *info; bool has_iotlb_device = false; + unsigned long flags; - assert_spin_locked(&device_domain_lock); - - list_for_each_entry(info, &domain->devices, link) + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(info, &domain->devices, link) { if (info->ats_enabled) { has_iotlb_device = true; break; } - + } domain->has_iotlb_device = has_iotlb_device; + spin_unlock_irqrestore(&device_domain_lock, flags); } static void iommu_enable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; - assert_spin_locked(&device_domain_lock); - if (!info || !dev_is_pci(info->dev)) return; @@ -1464,8 +1460,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; - assert_spin_locked(&device_domain_lock); - if (!dev_is_pci(info->dev)) return; @@ -1908,11 +1902,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain, struct pasid_table *table, u8 bus, u8 devfn) { + struct device_domain_info *info = + iommu_support_dev_iotlb(domain, iommu, bus, devfn); u16 did = domain->iommu_did[iommu->seq_id]; int translation = CONTEXT_TT_MULTI_LEVEL; - struct device_domain_info *info = NULL; struct context_entry *context; - unsigned long flags; int ret; WARN_ON(did == 0); @@ -1925,7 +1919,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, BUG_ON(!domain->pgd); - spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&iommu->lock); ret = -ENOMEM; @@ -1978,7 +1971,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, * Setup the Device-TLB enable bit and Page request * Enable bit: */ - info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); if (info && info->ats_supported) context_set_sm_dte(context); if (info && info->pri_supported) @@ -2001,7 +1993,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, goto out_unlock; } - info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); if (info && info->ats_supported) translation = CONTEXT_TT_DEV_IOTLB; else @@ -2047,7 +2038,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, out_unlock: spin_unlock(&iommu->lock); - spin_unlock_irqrestore(&device_domain_lock, flags); return ret; } @@ -2460,15 +2450,14 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) if (!iommu) return -ENODEV; - spin_lock_irqsave(&device_domain_lock, flags); - info->domain = domain; ret = domain_attach_iommu(domain, iommu); - if (ret) { - spin_unlock_irqrestore(&device_domain_lock, flags); + if (ret) return ret; - } + + spin_lock_irqsave(&device_domain_lock, flags); list_add(&info->link, &domain->devices); spin_unlock_irqrestore(&device_domain_lock, flags); + info->domain = domain; /* PASID table is mandatory for a PCI device in scalable mode. */ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { @@ -4637,7 +4626,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) struct device_domain_info *info = dev_iommu_priv_get(dev); struct context_entry *context; struct dmar_domain *domain; - unsigned long flags; u64 ctx_lo; int ret; @@ -4645,7 +4633,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) if (!domain) return -EINVAL; - spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&iommu->lock); ret = -EINVAL; @@ -4677,7 +4664,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) out: spin_unlock(&iommu->lock); - spin_unlock_irqrestore(&device_domain_lock, flags); return ret; } -- 2.25.1