From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
To: linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org
Cc: joro@8bytes.org, robin.murphy@arm.com,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH v3 08/14] iommu/amd: Remove amd_iommu_domain_get_pgtable
Date: Sun, 4 Oct 2020 01:45:43 +0000 [thread overview]
Message-ID: <20201004014549.16065-9-suravee.suthikulpanit@amd.com> (raw)
In-Reply-To: <20201004014549.16065-1-suravee.suthikulpanit@amd.com>
Since the IO page table root and mode parameters have been moved into
the struct amd_io_pg, the function is no longer needed. Therefore,
remove it along with the struct domain_pgtable.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu.h | 4 ++--
drivers/iommu/amd/amd_iommu_types.h | 6 -----
drivers/iommu/amd/io_pgtable.c | 36 ++++++++++-------------------
drivers/iommu/amd/iommu.c | 34 ++++-----------------------
4 files changed, 19 insertions(+), 61 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8dff7d85be79..2059e64fdc53 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -101,6 +101,8 @@ static inline
void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{
atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
}
static inline
@@ -135,8 +137,6 @@ extern unsigned long iommu_unmap_page(struct protection_domain *dom,
extern u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size);
-extern void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
extern void amd_iommu_free_pgtable(struct amd_io_pgtable *pgtable);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 80b5c34357ed..de3fe9433080 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -514,12 +514,6 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
-/* For decocded pt_root */
-struct domain_pgtable {
- int mode;
- u64 *root;
-};
-
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 23e82da2dea8..6c063d2c8bf0 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -184,30 +184,27 @@ static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp)
{
- struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address <= PM_LEVEL_SIZE(pgtable.mode))
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
goto out;
ret = false;
- if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
goto out;
- *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
- pgtable.root = pte;
- pgtable.mode += 1;
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
amd_iommu_update_and_flush_device_table(domain);
amd_iommu_domain_flush_complete(domain);
@@ -215,7 +212,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
- amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
ret = true;
@@ -232,29 +229,23 @@ static u64 *alloc_pte(struct protection_domain *domain,
gfp_t gfp,
bool *updated)
{
- struct domain_pgtable pgtable;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
if (!increase_address_space(domain, address, gfp))
return NULL;
-
- /* Read new values to check if update was successful */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
}
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -330,19 +321,16 @@ u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{
- struct domain_pgtable pgtable;
int level;
u64 *pte;
*page_size = 0;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address > PM_LEVEL_SIZE(pgtable.mode))
+ if (address > PM_LEVEL_SIZE(domain->iop.mode))
return NULL;
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cbbea7b952fb..3f6ede1e572c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -140,15 +140,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
-{
- u64 pt_root = atomic64_read(&domain->iop.pt_root);
-
- pgtable->root = (u64 *)(pt_root & PAGE_MASK);
- pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1464,7 +1455,6 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1480,7 +1470,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
@@ -1806,10 +1795,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
void amd_iommu_domain_update(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
@@ -2058,12 +2044,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
int prot = 0;
int ret;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2083,10 +2067,8 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return 0;
return iommu_unmap_page(domain, iova, page_size);
@@ -2097,11 +2079,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask, pte_pgsize;
- struct domain_pgtable pgtable;
u64 *pte, __pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return iova;
pte = fetch_pte(domain, iova, &pte_pgsize);
@@ -2470,11 +2450,9 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, int pasid,
unsigned long cr3)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -2488,11 +2466,9 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
static int __clear_gcr3(struct protection_domain *domain, int pasid)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
--
2.17.1
WARNING: multiple messages have this Message-ID
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
To: linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org
Cc: robin.murphy@arm.com
Subject: [PATCH v3 08/14] iommu/amd: Remove amd_iommu_domain_get_pgtable
Date: Sun, 4 Oct 2020 01:45:43 +0000 [thread overview]
Message-ID: <20201004014549.16065-9-suravee.suthikulpanit@amd.com> (raw)
In-Reply-To: <20201004014549.16065-1-suravee.suthikulpanit@amd.com>
Since the IO page table root and mode parameters have been moved into
the struct amd_io_pg, the function is no longer needed. Therefore,
remove it along with the struct domain_pgtable.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu.h | 4 ++--
drivers/iommu/amd/amd_iommu_types.h | 6 -----
drivers/iommu/amd/io_pgtable.c | 36 ++++++++++-------------------
drivers/iommu/amd/iommu.c | 34 ++++-----------------------
4 files changed, 19 insertions(+), 61 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8dff7d85be79..2059e64fdc53 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -101,6 +101,8 @@ static inline
void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{
atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
}
static inline
@@ -135,8 +137,6 @@ extern unsigned long iommu_unmap_page(struct protection_domain *dom,
extern u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size);
-extern void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
extern void amd_iommu_free_pgtable(struct amd_io_pgtable *pgtable);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 80b5c34357ed..de3fe9433080 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -514,12 +514,6 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
-/* For decocded pt_root */
-struct domain_pgtable {
- int mode;
- u64 *root;
-};
-
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 23e82da2dea8..6c063d2c8bf0 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -184,30 +184,27 @@ static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp)
{
- struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address <= PM_LEVEL_SIZE(pgtable.mode))
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
goto out;
ret = false;
- if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
goto out;
- *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
- pgtable.root = pte;
- pgtable.mode += 1;
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
amd_iommu_update_and_flush_device_table(domain);
amd_iommu_domain_flush_complete(domain);
@@ -215,7 +212,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
- amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
ret = true;
@@ -232,29 +229,23 @@ static u64 *alloc_pte(struct protection_domain *domain,
gfp_t gfp,
bool *updated)
{
- struct domain_pgtable pgtable;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
if (!increase_address_space(domain, address, gfp))
return NULL;
-
- /* Read new values to check if update was successful */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
}
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -330,19 +321,16 @@ u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{
- struct domain_pgtable pgtable;
int level;
u64 *pte;
*page_size = 0;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address > PM_LEVEL_SIZE(pgtable.mode))
+ if (address > PM_LEVEL_SIZE(domain->iop.mode))
return NULL;
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cbbea7b952fb..3f6ede1e572c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -140,15 +140,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
-{
- u64 pt_root = atomic64_read(&domain->iop.pt_root);
-
- pgtable->root = (u64 *)(pt_root & PAGE_MASK);
- pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1464,7 +1455,6 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1480,7 +1470,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
@@ -1806,10 +1795,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
void amd_iommu_domain_update(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
@@ -2058,12 +2044,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
int prot = 0;
int ret;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2083,10 +2067,8 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return 0;
return iommu_unmap_page(domain, iova, page_size);
@@ -2097,11 +2079,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask, pte_pgsize;
- struct domain_pgtable pgtable;
u64 *pte, __pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return iova;
pte = fetch_pte(domain, iova, &pte_pgsize);
@@ -2470,11 +2450,9 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, int pasid,
unsigned long cr3)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -2488,11 +2466,9 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
static int __clear_gcr3(struct protection_domain *domain, int pasid)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
--
2.17.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-10-04 1:42 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-04 1:45 [PATCH v3 00/14] iommu/amd: Add Generic IO Page Table Framework Support Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 01/14] iommu/amd: Re-define amd_iommu_domain_encode_pgtable as inline Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 02/14] iommu/amd: Prepare for generic IO page table framework Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 03/14] iommu/amd: Move pt_root to to struct amd_io_pgtable Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 04/14] iommu/amd: Convert to using amd_io_pgtable Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 05/14] iommu/amd: Declare functions as extern Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 06/14] iommu/amd: Move IO page table related functions Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 07/14] iommu/amd: Restructure code for freeing page table Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit [this message]
2020-10-04 1:45 ` [PATCH v3 08/14] iommu/amd: Remove amd_iommu_domain_get_pgtable Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 09/14] iommu/amd: Rename variables to be consistent with struct io_pgtable_ops Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 10/14] iommu/amd: Refactor fetch_pte to use struct amd_io_pgtable Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 11/14] iommu/amd: Introduce iommu_v1_iova_to_phys Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 12/14] iommu/amd: Introduce iommu_v1_map_page and iommu_v1_unmap_page Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 13/14] iommu/amd: Introduce IOMMU flush callbacks Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-04 1:45 ` [PATCH v3 14/14] iommu/amd: Adopt IO page table framework Suravee Suthikulpanit
2020-10-04 1:45 ` Suravee Suthikulpanit
2020-10-05 10:33 ` Jonathan Cameron
2020-10-05 10:33 ` Jonathan Cameron
2020-11-02 3:16 ` [PATCH v3 00/14] iommu/amd: Add Generic IO Page Table Framework Support Suravee Suthikulpanit
2020-11-02 3:16 ` Suravee Suthikulpanit
2020-11-11 3:10 ` Suravee Suthikulpanit
2020-11-11 3:10 ` Suravee Suthikulpanit
2020-11-13 5:57 ` Suravee Suthikulpanit
2020-11-13 5:57 ` Suravee Suthikulpanit
2020-11-17 22:43 ` Will Deacon
2020-11-17 22:43 ` Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201004014549.16065-9-suravee.suthikulpanit@amd.com \
--to=suravee.suthikulpanit@amd.com \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.