All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Davis <wdavis@nvidia.com>
To: Bjorn Helgaas <bhelgaas@google.com>
Cc: Alex Williamson <alex.williamson@redhat.com>,
	Joerg Roedel <joro@8bytes.org>,
	<iommu@lists.linux-foundation.org>, <linux-pci@vger.kernel.org>,
	Konrad Wilk <konrad.wilk@oracle.com>,
	Mark Hounschell <markh@compro.net>,
	"David S. Miller" <davem@davemloft.net>,
	Jonathan Corbet <corbet@lwn.net>,
	Terence Ripperda <tripperda@nvidia.com>,
	John Hubbard <jhubbard@nvidia.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Will Davis <wdavis@nvidia.com>
Subject: [PATCH 16/22] iommu/amd: Implement (un)map_peer_resource
Date: Tue, 15 Sep 2015 12:11:01 -0500	[thread overview]
Message-ID: <1442337067-22964-17-git-send-email-wdavis@nvidia.com> (raw)
In-Reply-To: <1442337067-22964-1-git-send-email-wdavis@nvidia.com>

Implement 'map_peer_resource' for the AMD IOMMU driver. Generalize the
existing map_page implementation to operate on a physical address, and
make both map_page and map_resource wrappers around that helper (and
similarly, for unmap_page and unmap_resource).

This allows a device to map another's resource, to enable peer-to-peer
transactions.

Add behind CONFIG_HAS_DMA_P2P guards, since the dma_map_ops members are
behind them as well.

Signed-off-by: Will Davis <wdavis@nvidia.com>
Reviewed-by: Terence Ripperda <tripperda@nvidia.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 drivers/iommu/amd_iommu.c | 99 ++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 86 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b7..13a47f283 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -471,6 +471,10 @@ DECLARE_STATS_COUNTER(cnt_map_single);
 DECLARE_STATS_COUNTER(cnt_unmap_single);
 DECLARE_STATS_COUNTER(cnt_map_sg);
 DECLARE_STATS_COUNTER(cnt_unmap_sg);
+#ifdef CONFIG_HAS_DMA_P2P
+DECLARE_STATS_COUNTER(cnt_map_peer_resource);
+DECLARE_STATS_COUNTER(cnt_unmap_peer_resource);
+#endif
 DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 DECLARE_STATS_COUNTER(cnt_free_coherent);
 DECLARE_STATS_COUNTER(cross_page);
@@ -509,6 +513,10 @@ static void amd_iommu_stats_init(void)
 	amd_iommu_stats_add(&cnt_unmap_single);
 	amd_iommu_stats_add(&cnt_map_sg);
 	amd_iommu_stats_add(&cnt_unmap_sg);
+#ifdef CONFIG_HAS_DMA_P2P
+	amd_iommu_stats_add(&cnt_map_peer_resource);
+	amd_iommu_stats_add(&cnt_unmap_peer_resource);
+#endif
 	amd_iommu_stats_add(&cnt_alloc_coherent);
 	amd_iommu_stats_add(&cnt_free_coherent);
 	amd_iommu_stats_add(&cross_page);
@@ -2585,20 +2593,16 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
 }
 
 /*
- * The exported map_single function for dma_ops.
+ * Wrapper function that contains code common to mapping a physical address
+ * range from a page or a resource.
  */
-static dma_addr_t map_page(struct device *dev, struct page *page,
-			   unsigned long offset, size_t size,
-			   enum dma_data_direction dir,
-			   struct dma_attrs *attrs)
+static dma_addr_t __map_phys(struct device *dev, phys_addr_t paddr,
+			     size_t size, enum dma_data_direction dir)
 {
 	unsigned long flags;
 	struct protection_domain *domain;
 	dma_addr_t addr;
 	u64 dma_mask;
-	phys_addr_t paddr = page_to_phys(page) + offset;
-
-	INC_STATS_COUNTER(cnt_map_single);
 
 	domain = get_domain(dev);
 	if (PTR_ERR(domain) == -EINVAL)
@@ -2624,16 +2628,15 @@ out:
 }
 
 /*
- * The exported unmap_single function for dma_ops.
+ * Wrapper function that contains code common to unmapping a physical address
+ * range from a page or a resource.
  */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-		       enum dma_data_direction dir, struct dma_attrs *attrs)
+static void __unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size,
+			 enum dma_data_direction dir)
 {
 	unsigned long flags;
 	struct protection_domain *domain;
 
-	INC_STATS_COUNTER(cnt_unmap_single);
-
 	domain = get_domain(dev);
 	if (IS_ERR(domain))
 		return;
@@ -2707,6 +2710,72 @@ unmap:
 }
 
 /*
+ * The exported map_single function for dma_ops.
+ */
+static dma_addr_t map_page(struct device *dev, struct page *page,
+			   unsigned long offset, size_t size,
+			   enum dma_data_direction dir,
+			   struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_map_single);
+
+	return __map_phys(dev, page_to_phys(page) + offset, size, dir);
+}
+
+/*
+ * The exported unmap_single function for dma_ops.
+ */
+static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+		       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_unmap_single);
+
+	__unmap_phys(dev, dma_addr, size, dir);
+}
+
+#ifdef CONFIG_HAS_DMA_P2P
+/*
+ * The exported map_peer_resource function for dma_ops.
+ */
+static dma_peer_addr_t map_peer_resource(struct device *dev,
+					 struct device *peer,
+					 struct resource *res,
+					 unsigned long offset,
+					 size_t size,
+					 enum dma_data_direction dir,
+					 struct dma_attrs *attrs)
+{
+	struct pci_dev *pdev;
+	struct pci_dev *ppeer;
+
+	INC_STATS_COUNTER(cnt_map_peer_resource);
+
+	if (!dev_is_pci(dev) || !dev_is_pci(peer))
+		return DMA_ERROR_CODE;
+
+	pdev = to_pci_dev(dev);
+	ppeer = to_pci_dev(peer);
+
+	if (!pci_peer_traffic_supported(pdev, ppeer))
+		return DMA_ERROR_CODE;
+
+	return __map_phys(dev, res->start + offset, size, dir);
+}
+
+/*
+ * The exported unmap_peer_resource function for dma_ops.
+ */
+static void unmap_peer_resource(struct device *dev, dma_peer_addr_t dma_addr,
+				size_t size, enum dma_data_direction dir,
+				struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_unmap_peer_resource);
+
+	__unmap_phys(dev, dma_addr, size, dir);
+}
+#endif
+
+/*
  * The exported map_sg function for dma_ops (handles scatter-gather
  * lists).
  */
@@ -2852,6 +2921,10 @@ static struct dma_map_ops amd_iommu_dma_ops = {
 	.unmap_page = unmap_page,
 	.map_sg = map_sg,
 	.unmap_sg = unmap_sg,
+#ifdef CONFIG_HAS_DMA_P2P
+	.map_peer_resource = map_peer_resource,
+	.unmap_peer_resource = unmap_peer_resource,
+#endif
 	.dma_supported = amd_iommu_dma_supported,
 };
 
-- 
2.5.1


WARNING: multiple messages have this Message-ID (diff)
From: Will Davis <wdavis@nvidia.com>
To: Bjorn Helgaas <bhelgaas@google.com>
Cc: Alex Williamson <alex.williamson@redhat.com>,
	Joerg Roedel <joro@8bytes.org>,
	iommu@lists.linux-foundation.org, linux-pci@vger.kernel.org,
	Konrad Wilk <konrad.wilk@oracle.com>,
	Mark Hounschell <markh@compro.net>,
	"David S. Miller" <davem@davemloft.net>,
	Jonathan Corbet <corbet@lwn.net>,
	Terence Ripperda <tripperda@nvidia.com>,
	John Hubbard <jhubbard@nvidia.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Will Davis <wdavis@nvidia.com>
Subject: [PATCH 16/22] iommu/amd: Implement (un)map_peer_resource
Date: Tue, 15 Sep 2015 12:11:01 -0500	[thread overview]
Message-ID: <1442337067-22964-17-git-send-email-wdavis@nvidia.com> (raw)
In-Reply-To: <1442337067-22964-1-git-send-email-wdavis@nvidia.com>

Implement 'map_peer_resource' for the AMD IOMMU driver. Generalize the
existing map_page implementation to operate on a physical address, and
make both map_page and map_resource wrappers around that helper (and
similarly, for unmap_page and unmap_resource).

This allows a device to map another's resource, to enable peer-to-peer
transactions.

Add behind CONFIG_HAS_DMA_P2P guards, since the dma_map_ops members are
behind them as well.

Signed-off-by: Will Davis <wdavis@nvidia.com>
Reviewed-by: Terence Ripperda <tripperda@nvidia.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 drivers/iommu/amd_iommu.c | 99 ++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 86 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b7..13a47f283 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -471,6 +471,10 @@ DECLARE_STATS_COUNTER(cnt_map_single);
 DECLARE_STATS_COUNTER(cnt_unmap_single);
 DECLARE_STATS_COUNTER(cnt_map_sg);
 DECLARE_STATS_COUNTER(cnt_unmap_sg);
+#ifdef CONFIG_HAS_DMA_P2P
+DECLARE_STATS_COUNTER(cnt_map_peer_resource);
+DECLARE_STATS_COUNTER(cnt_unmap_peer_resource);
+#endif
 DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 DECLARE_STATS_COUNTER(cnt_free_coherent);
 DECLARE_STATS_COUNTER(cross_page);
@@ -509,6 +513,10 @@ static void amd_iommu_stats_init(void)
 	amd_iommu_stats_add(&cnt_unmap_single);
 	amd_iommu_stats_add(&cnt_map_sg);
 	amd_iommu_stats_add(&cnt_unmap_sg);
+#ifdef CONFIG_HAS_DMA_P2P
+	amd_iommu_stats_add(&cnt_map_peer_resource);
+	amd_iommu_stats_add(&cnt_unmap_peer_resource);
+#endif
 	amd_iommu_stats_add(&cnt_alloc_coherent);
 	amd_iommu_stats_add(&cnt_free_coherent);
 	amd_iommu_stats_add(&cross_page);
@@ -2585,20 +2593,16 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
 }
 
 /*
- * The exported map_single function for dma_ops.
+ * Wrapper function that contains code common to mapping a physical address
+ * range from a page or a resource.
  */
-static dma_addr_t map_page(struct device *dev, struct page *page,
-			   unsigned long offset, size_t size,
-			   enum dma_data_direction dir,
-			   struct dma_attrs *attrs)
+static dma_addr_t __map_phys(struct device *dev, phys_addr_t paddr,
+			     size_t size, enum dma_data_direction dir)
 {
 	unsigned long flags;
 	struct protection_domain *domain;
 	dma_addr_t addr;
 	u64 dma_mask;
-	phys_addr_t paddr = page_to_phys(page) + offset;
-
-	INC_STATS_COUNTER(cnt_map_single);
 
 	domain = get_domain(dev);
 	if (PTR_ERR(domain) == -EINVAL)
@@ -2624,16 +2628,15 @@ out:
 }
 
 /*
- * The exported unmap_single function for dma_ops.
+ * Wrapper function that contains code common to unmapping a physical address
+ * range from a page or a resource.
  */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-		       enum dma_data_direction dir, struct dma_attrs *attrs)
+static void __unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size,
+			 enum dma_data_direction dir)
 {
 	unsigned long flags;
 	struct protection_domain *domain;
 
-	INC_STATS_COUNTER(cnt_unmap_single);
-
 	domain = get_domain(dev);
 	if (IS_ERR(domain))
 		return;
@@ -2707,6 +2710,72 @@ unmap:
 }
 
 /*
+ * The exported map_single function for dma_ops.
+ */
+static dma_addr_t map_page(struct device *dev, struct page *page,
+			   unsigned long offset, size_t size,
+			   enum dma_data_direction dir,
+			   struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_map_single);
+
+	return __map_phys(dev, page_to_phys(page) + offset, size, dir);
+}
+
+/*
+ * The exported unmap_single function for dma_ops.
+ */
+static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+		       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_unmap_single);
+
+	__unmap_phys(dev, dma_addr, size, dir);
+}
+
+#ifdef CONFIG_HAS_DMA_P2P
+/*
+ * The exported map_peer_resource function for dma_ops.
+ */
+static dma_peer_addr_t map_peer_resource(struct device *dev,
+					 struct device *peer,
+					 struct resource *res,
+					 unsigned long offset,
+					 size_t size,
+					 enum dma_data_direction dir,
+					 struct dma_attrs *attrs)
+{
+	struct pci_dev *pdev;
+	struct pci_dev *ppeer;
+
+	INC_STATS_COUNTER(cnt_map_peer_resource);
+
+	if (!dev_is_pci(dev) || !dev_is_pci(peer))
+		return DMA_ERROR_CODE;
+
+	pdev = to_pci_dev(dev);
+	ppeer = to_pci_dev(peer);
+
+	if (!pci_peer_traffic_supported(pdev, ppeer))
+		return DMA_ERROR_CODE;
+
+	return __map_phys(dev, res->start + offset, size, dir);
+}
+
+/*
+ * The exported unmap_peer_resource function for dma_ops.
+ */
+static void unmap_peer_resource(struct device *dev, dma_peer_addr_t dma_addr,
+				size_t size, enum dma_data_direction dir,
+				struct dma_attrs *attrs)
+{
+	INC_STATS_COUNTER(cnt_unmap_peer_resource);
+
+	__unmap_phys(dev, dma_addr, size, dir);
+}
+#endif
+
+/*
  * The exported map_sg function for dma_ops (handles scatter-gather
  * lists).
  */
@@ -2852,6 +2921,10 @@ static struct dma_map_ops amd_iommu_dma_ops = {
 	.unmap_page = unmap_page,
 	.map_sg = map_sg,
 	.unmap_sg = unmap_sg,
+#ifdef CONFIG_HAS_DMA_P2P
+	.map_peer_resource = map_peer_resource,
+	.unmap_peer_resource = unmap_peer_resource,
+#endif
 	.dma_supported = amd_iommu_dma_supported,
 };
 
-- 
2.5.1

  parent reply	other threads:[~2015-09-15 17:11 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-15 17:10 [PATCH 00/22] DMA-API/PCI map_peer_resource support for peer-to-peer Will Davis
2015-09-15 17:10 ` Will Davis
2015-09-15 17:10 ` [PATCH 01/22] lib/Kconfig: add HAS_DMA_P2P for peer-to-peer support Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 02/22] linux/types.h: Add dma_peer_addr_t type Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 03/22] dma-debug: add checking for map/unmap_peer_resource Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 04/22] DMA-API: Introduce dma_(un)map_peer_resource Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 05/22] dma-mapping: pci: add pci_(un)map_peer_resource Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 06/22] DMA-API: Add peer resource mapping documentation Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 07/22] PCI: Export pci_find_host_bridge() Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 08/22] PCI: Add pci_find_common_upstream_dev() Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 09/22] PCI: Add pci_peer_traffic_supported() Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-24 21:49   ` Bjorn Helgaas
2015-10-21 10:10   ` Benjamin Herrenschmidt
2015-09-15 17:10 ` [PATCH 10/22] PCI: Add pci_resource_to_peer() Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-24 21:36   ` Bjorn Helgaas
2015-09-24 21:36     ` Bjorn Helgaas
2015-09-15 17:10 ` [PATCH 11/22] swiotlb: Add map_peer_resource stub Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 19:40   ` Konrad Rzeszutek Wilk
2015-09-15 19:49     ` William Davis
2015-09-15 17:10 ` [PATCH 12/22] x86, swiotlb: Add swiotlb_map_peer_resource() to swiotlb_dma_ops Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 13/22] x86, platform: Add swiotlb_map_peer_resource() to sta2x11_dma_ops Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:10 ` [PATCH 14/22] swiotlb-xen: add map_peer_resource stub Will Davis
2015-09-15 17:10   ` Will Davis
2015-09-15 17:11 ` [PATCH 15/22] pci-swiotlb-xen: add xen_swiotlb_map_peer_resource to xen_swiotlb_dma_ops Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` Will Davis [this message]
2015-09-15 17:11   ` [PATCH 16/22] iommu/amd: Implement (un)map_peer_resource Will Davis
2015-09-15 17:11 ` [PATCH 17/22] iommu/vt-d: implement (un)map_peer_resource Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` [PATCH 18/22] x86: add pci-nommu implementation of map_peer_resource Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` [PATCH 19/22] x86: Calgary: Add map_peer_resource stub Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` [PATCH 20/22] x86: gart: " Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` [PATCH 21/22] x86: add dma_peer_mapping_error() Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-15 17:11 ` [PATCH 22/22] x86: declare support for DMA P2P Will Davis
2015-09-15 17:11   ` Will Davis
2015-09-24 21:28 ` [PATCH 00/22] DMA-API/PCI map_peer_resource support for peer-to-peer Bjorn Helgaas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1442337067-22964-17-git-send-email-wdavis@nvidia.com \
    --to=wdavis@nvidia.com \
    --cc=alex.williamson@redhat.com \
    --cc=bhelgaas@google.com \
    --cc=corbet@lwn.net \
    --cc=davem@davemloft.net \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-pci@vger.kernel.org \
    --cc=markh@compro.net \
    --cc=tripperda@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.