All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vasant Hegde via iommu <iommu@lists.linux-foundation.org>
To: <iommu@lists.linux-foundation.org>, <joro@8bytes.org>
Cc: Vasant Hegde <vasant.hegde@amd.com>
Subject: [RESEND PATCH v1 29/37] iommu/amd: Remove global amd_iommu_last_bdf
Date: Mon, 4 Apr 2022 15:30:15 +0530	[thread overview]
Message-ID: <20220404100023.324645-30-vasant.hegde@amd.com> (raw)
In-Reply-To: <20220404100023.324645-1-vasant.hegde@amd.com>

Replace it with per PCI segment last_bdf variable.

Co-developed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
 drivers/iommu/amd/amd_iommu_types.h |  3 ---
 drivers/iommu/amd/init.c            | 35 ++++++++++-------------------
 drivers/iommu/amd/iommu.c           | 10 ++++++---
 3 files changed, 19 insertions(+), 29 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 0aa170014b85..1109961e1042 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -829,9 +829,6 @@ struct unity_map_entry {
 /* size of the dma_ops aperture as power of 2 */
 extern unsigned amd_iommu_aperture_order;
 
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
 /* allocation bitmap for domain ids */
 extern unsigned long *amd_iommu_pd_alloc_bitmap;
 
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index b3905b1c4bc9..093304d16c85 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -161,9 +161,6 @@ static bool amd_iommu_disabled __initdata;
 static bool amd_iommu_force_enable __initdata;
 static int amd_iommu_target_ivhd_type;
 
-u16 amd_iommu_last_bdf;			/* largest PCI device id we have
-					   to handle */
-
 LIST_HEAD(amd_iommu_pci_seg_list);	/* list of all PCI segments */
 LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
 					   system */
@@ -245,16 +242,10 @@ static void init_translation_status(struct amd_iommu *iommu)
 		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
 }
 
-static inline void update_last_devid(u16 devid)
-{
-	if (devid > amd_iommu_last_bdf)
-		amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
 {
 	unsigned shift = PAGE_SHIFT +
-			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+			 get_order((last_bdf + 1) * entry_size);
 
 	return 1UL << shift;
 }
@@ -538,7 +529,6 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 		switch (dev->type) {
 		case IVHD_DEV_ALL:
 			/* Use maximum BDF value for DEV_ALL */
-			update_last_devid(0xffff);
 			return 0xffff;
 			break;
 		case IVHD_DEV_SELECT:
@@ -546,7 +536,6 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 		case IVHD_DEV_ALIAS:
 		case IVHD_DEV_EXT_SELECT:
 			/* all the above subfield types refer to device ids */
-			update_last_devid(dev->devid);
 			if (dev->devid > last_devid)
 				last_devid = dev->devid;
 			break;
@@ -688,7 +677,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
 	/*
 	 * let all alias entries point to itself
 	 */
-	for (i = 0; i <= amd_iommu_last_bdf; ++i)
+	for (i = 0; i <= pci_seg->last_bdf; ++i)
 		pci_seg->alias_table[i] = i;
 
 	return 0;
@@ -1054,7 +1043,7 @@ static bool __copy_device_table(struct amd_iommu *iommu)
 		return false;
 	}
 
-	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
 		pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
 		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
 		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
@@ -1315,7 +1304,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
 
 			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
 
-			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+			for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
 				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
 			break;
 		case IVHD_DEV_SELECT:
@@ -1560,9 +1549,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
 
 	pci_seg->last_bdf = last_bdf;
 	DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
-	pci_seg->dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
-	pci_seg->alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
-	pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
+	pci_seg->dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+	pci_seg->alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+	pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
 
 	pci_seg->id = id;
 	init_llist_head(&pci_seg->dev_data_list);
@@ -2443,7 +2432,7 @@ static int __init init_unity_map_range(struct ivmd_header *m,
 	case ACPI_IVMD_TYPE_ALL:
 		s = "IVMD_TYPE_ALL\t\t";
 		e->devid_start = 0;
-		e->devid_end = amd_iommu_last_bdf;
+		e->devid_end = pci_seg->last_bdf;
 		break;
 	case ACPI_IVMD_TYPE_RANGE:
 		s = "IVMD_TYPE_RANGE\t\t";
@@ -2510,7 +2499,7 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
 	if (dev_table == NULL)
 		return;
 
-	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
 		__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
 		__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
 	}
@@ -2524,7 +2513,7 @@ static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
 	if (dev_table == NULL)
 		return;
 
-	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
 		dev_table[devid].data[0] = 0ULL;
 		dev_table[devid].data[1] = 0ULL;
 	}
@@ -2539,7 +2528,7 @@ static void init_device_table(void)
 		return;
 
 	for_each_pci_segment(pci_seg) {
-		for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+		for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
 			__set_dev_entry_bit(pci_seg->dev_table,
 					    devid, DEV_ENTRY_IRQ_TBL_EN);
 	}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 13731fa241f2..69c88e6c9fde 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -357,6 +357,8 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
  */
 static bool check_device(struct device *dev)
 {
+	struct amd_iommu_pci_seg *pci_seg;
+	struct amd_iommu *iommu;
 	int devid;
 
 	if (!dev)
@@ -366,11 +368,13 @@ static bool check_device(struct device *dev)
 	if (devid < 0)
 		return false;
 
-	/* Out of our scope? */
-	if (devid > amd_iommu_last_bdf)
+	iommu = rlookup_amd_iommu(dev);
+	if (!iommu)
 		return false;
 
-	if (rlookup_amd_iommu(dev) == NULL)
+	/* Out of our scope? */
+	pci_seg = iommu->pci_seg;
+	if ((devid & 0xffff) > pci_seg->last_bdf)
 		return false;
 
 	return true;
-- 
2.27.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2022-04-04 10:08 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-04  9:59 [RESEND PATCH v1 00/37] iommu/amd: Add multiple PCI segments support Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 01/37] iommu/amd: Update struct iommu_dev_data defination Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 02/37] iommu/amd: Introduce pci segment structure Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 03/37] iommu/amd: Introduce per PCI segment device table Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 04/37] iommu/amd: Introduce per PCI segment rlookup table Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 05/37] iommu/amd: Introduce per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 06/37] iommu/amd: Introduce per PCI segment dev_data_list Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 07/37] iommu/amd: Introduce per PCI segment old_dev_tbl_cpy Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 08/37] iommu/amd: Introduce per PCI segment alias_table Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 09/37] iommu/amd: Introduce per PCI segment unity map list Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 10/37] iommu/amd: Introduce per PCI segment last_bdf Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 11/37] iommu/amd: Introduce per PCI segment device table size Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 12/37] iommu/amd: Introduce per PCI segment alias " Vasant Hegde via iommu
2022-04-04  9:59 ` [RESEND PATCH v1 13/37] iommu/amd: Introduce per PCI segment rlookup " Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 14/37] iommu/amd: Convert to use per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 15/37] iommu/amd: Convert to use rlookup_amd_iommu helper function Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 16/37] iommu/amd: Update irq_remapping_alloc to use IOMMU lookup " Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 17/37] iommu/amd: Introduce struct amd_ir_data.iommu Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 18/37] iommu/amd: Update amd_irte_ops functions Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 19/37] iommu/amd: Update alloc_irq_table and alloc_irq_index Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 20/37] iommu/amd: Convert to use per PCI segment rlookup_table Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 21/37] iommu/amd: Update set_dte_entry and clear_dte_entry Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 22/37] iommu/amd: Update iommu_ignore_device Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 23/37] iommu/amd: Update dump_dte_entry Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 24/37] iommu/amd: Update set_dte_irq_entry Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 25/37] iommu/amd: Update (un)init_device_table_dma() Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 26/37] iommu/amd: Update set_dev_entry_bit() and get_dev_entry_bit() Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 27/37] iommu/amd: Remove global amd_iommu_dev_table Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 28/37] iommu/amd: Remove global amd_iommu_alias_table Vasant Hegde via iommu
2022-04-04 10:00 ` Vasant Hegde via iommu [this message]
2022-04-04 10:00 ` [RESEND PATCH v1 30/37] iommu/amd: Flush upto last_bdf only Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 31/37] iommu/amd: Introduce get_device_sbdf_id() helper function Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 32/37] iommu/amd: Include PCI segment ID when initialize IOMMU Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 33/37] iommu/amd: Specify PCI segment ID when getting pci device Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 34/37] iommu/amd: Add PCI segment support for ivrs_ioapic, ivrs_hpet, ivrs_acpihid commands Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 35/37] iommu/amd: Print PCI segment ID in error log messages Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 36/37] iommu/amd: Update device_state structure to include PCI seg ID Vasant Hegde via iommu
2022-04-04 10:00 ` [RESEND PATCH v1 37/37] iommu/amd: Update amd_iommu_fault " Vasant Hegde via iommu
2022-04-12 14:57 ` [RESEND PATCH v1 00/37] iommu/amd: Add multiple PCI segments support Vasant Hegde via iommu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220404100023.324645-30-vasant.hegde@amd.com \
    --to=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=vasant.hegde@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.