All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Li, Zhen-Hua" <zhen-hual@hp.com>
To: <dwmw2@infradead.org>, <indou.takao@jp.fujitsu.com>,
	<bhe@redhat.com>, <joro@8bytes.org>, <vgoyal@redhat.com>,
	<dyoung@redhat.com>
Cc: <iommu@lists.linux-foundation.org>,
	<linux-kernel@vger.kernel.org>, <linux-pci@vger.kernel.org>,
	<kexec@lists.infradead.org>, <alex.williamson@redhat.com>,
	<ddutile@redhat.com>, <ishii.hironobu@jp.fujitsu.com>,
	<bhelgaas@google.com>, <doug.hatch@hp.com>,
	<jerry.hoemann@hp.com>, <tom.vaden@hp.com>, <li.zhang6@hp.com>,
	<lisa.mitchell@hp.com>, <billsumnerlinux@gmail.com>,
	<zhen-hual@hp.com>, <rwright@hp.com>
Subject: [PATCH v10 07/10] iommu/vt-d: enable kdump support in iommu module
Date: Fri, 10 Apr 2015 16:42:10 +0800	[thread overview]
Message-ID: <1428655333-19504-8-git-send-email-zhen-hual@hp.com> (raw)
In-Reply-To: <1428655333-19504-1-git-send-email-zhen-hual@hp.com>

Modify the operation of the following functions when called during crash dump:
    device_to_context_entry
    free_context_table
    get_domain_for_dev
    init_dmars
    intel_iommu_init

Bill Sumner:
    Original version.

Zhenhua:
    The name of new calling functions.
    Do not disable and re-enable TE in kdump kernel.
    Use the did and gaw from old context entry;

Signed-off-by: Bill Sumner <billsumnerlinux@gmail.com>
Signed-off-by: Li, Zhen-Hua <zhen-hual@hp.com>
---
 drivers/iommu/intel-iommu.c | 94 +++++++++++++++++++++++++++++++++++++++------
 1 file changed, 82 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 78c1d65..3d4ea43 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -399,6 +399,7 @@ static int copy_root_entry_table(struct intel_iommu *iommu);
 static int intel_iommu_load_translation_tables(struct intel_iommu *iommu);
 
 static void iommu_check_pre_te_status(struct intel_iommu *iommu);
+static u8 g_translation_pre_enabled;
 
 /*
  * This domain is a statically identity mapping domain.
@@ -839,6 +840,9 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
 		set_root_value(root, phy_addr);
 		set_root_present(root);
 		__iommu_flush_cache(iommu, root, sizeof(*root));
+
+		if (iommu->pre_enabled_trans)
+			__iommu_update_old_root_entry(iommu, bus);
 	}
 	spin_unlock_irqrestore(&iommu->lock, flags);
 	return &context[devfn];
@@ -890,7 +894,8 @@ static void free_context_table(struct intel_iommu *iommu)
 
 	spin_lock_irqsave(&iommu->lock, flags);
 	if (!iommu->root_entry) {
-		goto out;
+		spin_unlock_irqrestore(&iommu->lock, flags);
+		return;
 	}
 	for (i = 0; i < ROOT_ENTRY_NR; i++) {
 		root = &iommu->root_entry[i];
@@ -898,10 +903,23 @@ static void free_context_table(struct intel_iommu *iommu)
 		if (context)
 			free_pgtable_page(context);
 	}
+
+	if (iommu->pre_enabled_trans) {
+		iommu->root_entry_old_phys = 0;
+		root = iommu->root_entry_old_virt;
+		iommu->root_entry_old_virt = NULL;
+	}
+
 	free_pgtable_page(iommu->root_entry);
 	iommu->root_entry = NULL;
-out:
+
 	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	/* We put this out of spin_unlock is because iounmap() may
+	 * cause error if surrounded by spin_lock and unlock;
+	 */
+	if (iommu->pre_enabled_trans)
+		iounmap(root);
 }
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
@@ -2319,6 +2337,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	unsigned long flags;
 	u8 bus, devfn;
 	int did = -1;   /* Default to "no domain_id supplied" */
+	struct context_entry *ce = NULL;
 
 	domain = find_domain(dev);
 	if (domain)
@@ -2352,6 +2371,20 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	domain = alloc_domain(0);
 	if (!domain)
 		return NULL;
+
+	if (iommu->pre_enabled_trans) {
+		/*
+		 * if this device had a did in the old kernel
+		 * use its values instead of generating new ones
+		 */
+		ce = device_to_existing_context_entry(iommu, bus, devfn);
+
+		if (ce) {
+			did = context_domain_id(ce);
+			gaw = agaw_to_width(context_address_width(ce));
+		}
+	}
+
 	domain->id = iommu_attach_domain_with_id(domain, iommu, did);
 	if (domain->id < 0) {
 		free_domain_mem(domain);
@@ -2879,6 +2912,7 @@ static int __init init_dmars(void)
 		goto free_g_iommus;
 	}
 
+	g_translation_pre_enabled = 0; /* To know whether to skip RMRR */
 	for_each_active_iommu(iommu, drhd) {
 		g_iommus[iommu->seq_id] = iommu;
 
@@ -2886,14 +2920,30 @@ static int __init init_dmars(void)
 		if (ret)
 			goto free_iommu;
 
-		/*
-		 * TBD:
-		 * we could share the same root & context tables
-		 * among all IOMMU's. Need to Split it later.
-		 */
-		ret = iommu_alloc_root_entry(iommu);
-		if (ret)
-			goto free_iommu;
+		iommu_check_pre_te_status(iommu);
+		if (iommu->pre_enabled_trans) {
+			pr_info("IOMMU Copying translate tables from panicked kernel\n");
+			ret = intel_iommu_load_translation_tables(iommu);
+			if (ret) {
+				pr_err("IOMMU: Copy translate tables failed\n");
+
+				/* Best to stop trying */
+				goto free_iommu;
+			}
+			pr_info("IOMMU: root_cache:0x%12.12llx phys:0x%12.12llx\n",
+				(u64)iommu->root_entry,
+				(u64)iommu->root_entry_old_phys);
+		} else {
+			/*
+			 * TBD:
+			 * we could share the same root & context tables
+			 * among all IOMMU's. Need to Split it later.
+			 */
+			ret = iommu_alloc_root_entry(iommu);
+			if (ret)
+				goto free_iommu;
+		}
+
 		if (!ecap_pass_through(iommu->ecap))
 			hw_pass_through = 0;
 	}
@@ -2911,6 +2961,14 @@ static int __init init_dmars(void)
 	check_tylersburg_isoch();
 
 	/*
+	 * In the second kernel: Skip setting-up new domains for
+	 * si, rmrr, and the isa bus on the expectation that these
+	 * translations were copied from the old kernel.
+	 */
+	if (g_translation_pre_enabled)
+		goto skip_new_domains_for_si_rmrr_isa;
+
+	/*
 	 * If pass through is not set or not enabled, setup context entries for
 	 * identity mappings for rmrr, gfx, and isa and may fall back to static
 	 * identity mapping if iommu_identity_mapping is set.
@@ -2950,6 +3008,8 @@ static int __init init_dmars(void)
 
 	iommu_prepare_isa();
 
+skip_new_domains_for_si_rmrr_isa:;
+
 	/*
 	 * for each drhd
 	 *   enable fault log
@@ -2978,7 +3038,13 @@ static int __init init_dmars(void)
 
 		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
 		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-		iommu_enable_translation(iommu);
+
+		if (iommu->pre_enabled_trans) {
+			if (!(iommu->gcmd & DMA_GCMD_TE))
+				iommu_enable_translation(iommu);
+		} else
+			iommu_enable_translation(iommu);
+
 		iommu_disable_protect_mem_regions(iommu);
 	}
 
@@ -4264,11 +4330,14 @@ int __init intel_iommu_init(void)
 	}
 
 	/*
-	 * Disable translation if already enabled prior to OS handover.
+	 * We do not need to disable translation if already enabled prior
+	 * to OS handover. Because we will try to copy old tables;
 	 */
+	/*
 	for_each_active_iommu(iommu, drhd)
 		if (iommu->gcmd & DMA_GCMD_TE)
 			iommu_disable_translation(iommu);
+	*/
 
 	if (dmar_dev_scope_init() < 0) {
 		if (force_on)
@@ -5090,5 +5159,6 @@ static void iommu_check_pre_te_status(struct intel_iommu *iommu)
 	if (sts & DMA_GSTS_TES) {
 		pr_info("Translation is enabled prior to OS.\n");
 		iommu->pre_enabled_trans = 1;
+		g_translation_pre_enabled = 1;
 	}
 }
-- 
2.0.0-rc0


WARNING: multiple messages have this Message-ID (diff)
From: "Li, Zhen-Hua" <zhen-hual-VXdhtT5mjnY@public.gmane.org>
To: dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org,
	indou.takao-+CUm20s59erQFUHtdCDX3A@public.gmane.org,
	bhe-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	joro-zLv9SwRftAIdnm+yROfE0A@public.gmane.org,
	vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	dyoung-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Cc: tom.vaden-VXdhtT5mjnY@public.gmane.org,
	rwright-VXdhtT5mjnY@public.gmane.org,
	linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	kexec-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	lisa.mitchell-VXdhtT5mjnY@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	zhen-hual-VXdhtT5mjnY@public.gmane.org,
	doug.hatch-VXdhtT5mjnY@public.gmane.org,
	ishii.hironobu-+CUm20s59erQFUHtdCDX3A@public.gmane.org,
	bhelgaas-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	billsumnerlinux-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org,
	li.zhang6-VXdhtT5mjnY@public.gmane.org
Subject: [PATCH v10 07/10] iommu/vt-d: enable kdump support in iommu module
Date: Fri, 10 Apr 2015 16:42:10 +0800	[thread overview]
Message-ID: <1428655333-19504-8-git-send-email-zhen-hual@hp.com> (raw)
In-Reply-To: <1428655333-19504-1-git-send-email-zhen-hual-VXdhtT5mjnY@public.gmane.org>

Modify the operation of the following functions when called during crash dump:
    device_to_context_entry
    free_context_table
    get_domain_for_dev
    init_dmars
    intel_iommu_init

Bill Sumner:
    Original version.

Zhenhua:
    The name of new calling functions.
    Do not disable and re-enable TE in kdump kernel.
    Use the did and gaw from old context entry;

Signed-off-by: Bill Sumner <billsumnerlinux-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Signed-off-by: Li, Zhen-Hua <zhen-hual-VXdhtT5mjnY@public.gmane.org>
---
 drivers/iommu/intel-iommu.c | 94 +++++++++++++++++++++++++++++++++++++++------
 1 file changed, 82 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 78c1d65..3d4ea43 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -399,6 +399,7 @@ static int copy_root_entry_table(struct intel_iommu *iommu);
 static int intel_iommu_load_translation_tables(struct intel_iommu *iommu);
 
 static void iommu_check_pre_te_status(struct intel_iommu *iommu);
+static u8 g_translation_pre_enabled;
 
 /*
  * This domain is a statically identity mapping domain.
@@ -839,6 +840,9 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
 		set_root_value(root, phy_addr);
 		set_root_present(root);
 		__iommu_flush_cache(iommu, root, sizeof(*root));
+
+		if (iommu->pre_enabled_trans)
+			__iommu_update_old_root_entry(iommu, bus);
 	}
 	spin_unlock_irqrestore(&iommu->lock, flags);
 	return &context[devfn];
@@ -890,7 +894,8 @@ static void free_context_table(struct intel_iommu *iommu)
 
 	spin_lock_irqsave(&iommu->lock, flags);
 	if (!iommu->root_entry) {
-		goto out;
+		spin_unlock_irqrestore(&iommu->lock, flags);
+		return;
 	}
 	for (i = 0; i < ROOT_ENTRY_NR; i++) {
 		root = &iommu->root_entry[i];
@@ -898,10 +903,23 @@ static void free_context_table(struct intel_iommu *iommu)
 		if (context)
 			free_pgtable_page(context);
 	}
+
+	if (iommu->pre_enabled_trans) {
+		iommu->root_entry_old_phys = 0;
+		root = iommu->root_entry_old_virt;
+		iommu->root_entry_old_virt = NULL;
+	}
+
 	free_pgtable_page(iommu->root_entry);
 	iommu->root_entry = NULL;
-out:
+
 	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	/* We put this out of spin_unlock is because iounmap() may
+	 * cause error if surrounded by spin_lock and unlock;
+	 */
+	if (iommu->pre_enabled_trans)
+		iounmap(root);
 }
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
@@ -2319,6 +2337,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	unsigned long flags;
 	u8 bus, devfn;
 	int did = -1;   /* Default to "no domain_id supplied" */
+	struct context_entry *ce = NULL;
 
 	domain = find_domain(dev);
 	if (domain)
@@ -2352,6 +2371,20 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	domain = alloc_domain(0);
 	if (!domain)
 		return NULL;
+
+	if (iommu->pre_enabled_trans) {
+		/*
+		 * if this device had a did in the old kernel
+		 * use its values instead of generating new ones
+		 */
+		ce = device_to_existing_context_entry(iommu, bus, devfn);
+
+		if (ce) {
+			did = context_domain_id(ce);
+			gaw = agaw_to_width(context_address_width(ce));
+		}
+	}
+
 	domain->id = iommu_attach_domain_with_id(domain, iommu, did);
 	if (domain->id < 0) {
 		free_domain_mem(domain);
@@ -2879,6 +2912,7 @@ static int __init init_dmars(void)
 		goto free_g_iommus;
 	}
 
+	g_translation_pre_enabled = 0; /* To know whether to skip RMRR */
 	for_each_active_iommu(iommu, drhd) {
 		g_iommus[iommu->seq_id] = iommu;
 
@@ -2886,14 +2920,30 @@ static int __init init_dmars(void)
 		if (ret)
 			goto free_iommu;
 
-		/*
-		 * TBD:
-		 * we could share the same root & context tables
-		 * among all IOMMU's. Need to Split it later.
-		 */
-		ret = iommu_alloc_root_entry(iommu);
-		if (ret)
-			goto free_iommu;
+		iommu_check_pre_te_status(iommu);
+		if (iommu->pre_enabled_trans) {
+			pr_info("IOMMU Copying translate tables from panicked kernel\n");
+			ret = intel_iommu_load_translation_tables(iommu);
+			if (ret) {
+				pr_err("IOMMU: Copy translate tables failed\n");
+
+				/* Best to stop trying */
+				goto free_iommu;
+			}
+			pr_info("IOMMU: root_cache:0x%12.12llx phys:0x%12.12llx\n",
+				(u64)iommu->root_entry,
+				(u64)iommu->root_entry_old_phys);
+		} else {
+			/*
+			 * TBD:
+			 * we could share the same root & context tables
+			 * among all IOMMU's. Need to Split it later.
+			 */
+			ret = iommu_alloc_root_entry(iommu);
+			if (ret)
+				goto free_iommu;
+		}
+
 		if (!ecap_pass_through(iommu->ecap))
 			hw_pass_through = 0;
 	}
@@ -2911,6 +2961,14 @@ static int __init init_dmars(void)
 	check_tylersburg_isoch();
 
 	/*
+	 * In the second kernel: Skip setting-up new domains for
+	 * si, rmrr, and the isa bus on the expectation that these
+	 * translations were copied from the old kernel.
+	 */
+	if (g_translation_pre_enabled)
+		goto skip_new_domains_for_si_rmrr_isa;
+
+	/*
 	 * If pass through is not set or not enabled, setup context entries for
 	 * identity mappings for rmrr, gfx, and isa and may fall back to static
 	 * identity mapping if iommu_identity_mapping is set.
@@ -2950,6 +3008,8 @@ static int __init init_dmars(void)
 
 	iommu_prepare_isa();
 
+skip_new_domains_for_si_rmrr_isa:;
+
 	/*
 	 * for each drhd
 	 *   enable fault log
@@ -2978,7 +3038,13 @@ static int __init init_dmars(void)
 
 		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
 		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-		iommu_enable_translation(iommu);
+
+		if (iommu->pre_enabled_trans) {
+			if (!(iommu->gcmd & DMA_GCMD_TE))
+				iommu_enable_translation(iommu);
+		} else
+			iommu_enable_translation(iommu);
+
 		iommu_disable_protect_mem_regions(iommu);
 	}
 
@@ -4264,11 +4330,14 @@ int __init intel_iommu_init(void)
 	}
 
 	/*
-	 * Disable translation if already enabled prior to OS handover.
+	 * We do not need to disable translation if already enabled prior
+	 * to OS handover. Because we will try to copy old tables;
 	 */
+	/*
 	for_each_active_iommu(iommu, drhd)
 		if (iommu->gcmd & DMA_GCMD_TE)
 			iommu_disable_translation(iommu);
+	*/
 
 	if (dmar_dev_scope_init() < 0) {
 		if (force_on)
@@ -5090,5 +5159,6 @@ static void iommu_check_pre_te_status(struct intel_iommu *iommu)
 	if (sts & DMA_GSTS_TES) {
 		pr_info("Translation is enabled prior to OS.\n");
 		iommu->pre_enabled_trans = 1;
+		g_translation_pre_enabled = 1;
 	}
 }
-- 
2.0.0-rc0

WARNING: multiple messages have this Message-ID (diff)
From: "Li, Zhen-Hua" <zhen-hual@hp.com>
To: dwmw2@infradead.org, indou.takao@jp.fujitsu.com, bhe@redhat.com,
	joro@8bytes.org, vgoyal@redhat.com, dyoung@redhat.com
Cc: jerry.hoemann@hp.com, tom.vaden@hp.com, rwright@hp.com,
	linux-pci@vger.kernel.org, kexec@lists.infradead.org,
	iommu@lists.linux-foundation.org, lisa.mitchell@hp.com,
	linux-kernel@vger.kernel.org, alex.williamson@redhat.com,
	zhen-hual@hp.com, ddutile@redhat.com, doug.hatch@hp.com,
	ishii.hironobu@jp.fujitsu.com, bhelgaas@google.com,
	billsumnerlinux@gmail.com, li.zhang6@hp.com
Subject: [PATCH v10 07/10] iommu/vt-d: enable kdump support in iommu module
Date: Fri, 10 Apr 2015 16:42:10 +0800	[thread overview]
Message-ID: <1428655333-19504-8-git-send-email-zhen-hual@hp.com> (raw)
In-Reply-To: <1428655333-19504-1-git-send-email-zhen-hual@hp.com>

Modify the operation of the following functions when called during crash dump:
    device_to_context_entry
    free_context_table
    get_domain_for_dev
    init_dmars
    intel_iommu_init

Bill Sumner:
    Original version.

Zhenhua:
    The name of new calling functions.
    Do not disable and re-enable TE in kdump kernel.
    Use the did and gaw from old context entry;

Signed-off-by: Bill Sumner <billsumnerlinux@gmail.com>
Signed-off-by: Li, Zhen-Hua <zhen-hual@hp.com>
---
 drivers/iommu/intel-iommu.c | 94 +++++++++++++++++++++++++++++++++++++++------
 1 file changed, 82 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 78c1d65..3d4ea43 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -399,6 +399,7 @@ static int copy_root_entry_table(struct intel_iommu *iommu);
 static int intel_iommu_load_translation_tables(struct intel_iommu *iommu);
 
 static void iommu_check_pre_te_status(struct intel_iommu *iommu);
+static u8 g_translation_pre_enabled;
 
 /*
  * This domain is a statically identity mapping domain.
@@ -839,6 +840,9 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
 		set_root_value(root, phy_addr);
 		set_root_present(root);
 		__iommu_flush_cache(iommu, root, sizeof(*root));
+
+		if (iommu->pre_enabled_trans)
+			__iommu_update_old_root_entry(iommu, bus);
 	}
 	spin_unlock_irqrestore(&iommu->lock, flags);
 	return &context[devfn];
@@ -890,7 +894,8 @@ static void free_context_table(struct intel_iommu *iommu)
 
 	spin_lock_irqsave(&iommu->lock, flags);
 	if (!iommu->root_entry) {
-		goto out;
+		spin_unlock_irqrestore(&iommu->lock, flags);
+		return;
 	}
 	for (i = 0; i < ROOT_ENTRY_NR; i++) {
 		root = &iommu->root_entry[i];
@@ -898,10 +903,23 @@ static void free_context_table(struct intel_iommu *iommu)
 		if (context)
 			free_pgtable_page(context);
 	}
+
+	if (iommu->pre_enabled_trans) {
+		iommu->root_entry_old_phys = 0;
+		root = iommu->root_entry_old_virt;
+		iommu->root_entry_old_virt = NULL;
+	}
+
 	free_pgtable_page(iommu->root_entry);
 	iommu->root_entry = NULL;
-out:
+
 	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	/* We put this out of spin_unlock is because iounmap() may
+	 * cause error if surrounded by spin_lock and unlock;
+	 */
+	if (iommu->pre_enabled_trans)
+		iounmap(root);
 }
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
@@ -2319,6 +2337,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	unsigned long flags;
 	u8 bus, devfn;
 	int did = -1;   /* Default to "no domain_id supplied" */
+	struct context_entry *ce = NULL;
 
 	domain = find_domain(dev);
 	if (domain)
@@ -2352,6 +2371,20 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 	domain = alloc_domain(0);
 	if (!domain)
 		return NULL;
+
+	if (iommu->pre_enabled_trans) {
+		/*
+		 * if this device had a did in the old kernel
+		 * use its values instead of generating new ones
+		 */
+		ce = device_to_existing_context_entry(iommu, bus, devfn);
+
+		if (ce) {
+			did = context_domain_id(ce);
+			gaw = agaw_to_width(context_address_width(ce));
+		}
+	}
+
 	domain->id = iommu_attach_domain_with_id(domain, iommu, did);
 	if (domain->id < 0) {
 		free_domain_mem(domain);
@@ -2879,6 +2912,7 @@ static int __init init_dmars(void)
 		goto free_g_iommus;
 	}
 
+	g_translation_pre_enabled = 0; /* To know whether to skip RMRR */
 	for_each_active_iommu(iommu, drhd) {
 		g_iommus[iommu->seq_id] = iommu;
 
@@ -2886,14 +2920,30 @@ static int __init init_dmars(void)
 		if (ret)
 			goto free_iommu;
 
-		/*
-		 * TBD:
-		 * we could share the same root & context tables
-		 * among all IOMMU's. Need to Split it later.
-		 */
-		ret = iommu_alloc_root_entry(iommu);
-		if (ret)
-			goto free_iommu;
+		iommu_check_pre_te_status(iommu);
+		if (iommu->pre_enabled_trans) {
+			pr_info("IOMMU Copying translate tables from panicked kernel\n");
+			ret = intel_iommu_load_translation_tables(iommu);
+			if (ret) {
+				pr_err("IOMMU: Copy translate tables failed\n");
+
+				/* Best to stop trying */
+				goto free_iommu;
+			}
+			pr_info("IOMMU: root_cache:0x%12.12llx phys:0x%12.12llx\n",
+				(u64)iommu->root_entry,
+				(u64)iommu->root_entry_old_phys);
+		} else {
+			/*
+			 * TBD:
+			 * we could share the same root & context tables
+			 * among all IOMMU's. Need to Split it later.
+			 */
+			ret = iommu_alloc_root_entry(iommu);
+			if (ret)
+				goto free_iommu;
+		}
+
 		if (!ecap_pass_through(iommu->ecap))
 			hw_pass_through = 0;
 	}
@@ -2911,6 +2961,14 @@ static int __init init_dmars(void)
 	check_tylersburg_isoch();
 
 	/*
+	 * In the second kernel: Skip setting-up new domains for
+	 * si, rmrr, and the isa bus on the expectation that these
+	 * translations were copied from the old kernel.
+	 */
+	if (g_translation_pre_enabled)
+		goto skip_new_domains_for_si_rmrr_isa;
+
+	/*
 	 * If pass through is not set or not enabled, setup context entries for
 	 * identity mappings for rmrr, gfx, and isa and may fall back to static
 	 * identity mapping if iommu_identity_mapping is set.
@@ -2950,6 +3008,8 @@ static int __init init_dmars(void)
 
 	iommu_prepare_isa();
 
+skip_new_domains_for_si_rmrr_isa:;
+
 	/*
 	 * for each drhd
 	 *   enable fault log
@@ -2978,7 +3038,13 @@ static int __init init_dmars(void)
 
 		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
 		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-		iommu_enable_translation(iommu);
+
+		if (iommu->pre_enabled_trans) {
+			if (!(iommu->gcmd & DMA_GCMD_TE))
+				iommu_enable_translation(iommu);
+		} else
+			iommu_enable_translation(iommu);
+
 		iommu_disable_protect_mem_regions(iommu);
 	}
 
@@ -4264,11 +4330,14 @@ int __init intel_iommu_init(void)
 	}
 
 	/*
-	 * Disable translation if already enabled prior to OS handover.
+	 * We do not need to disable translation if already enabled prior
+	 * to OS handover. Because we will try to copy old tables;
 	 */
+	/*
 	for_each_active_iommu(iommu, drhd)
 		if (iommu->gcmd & DMA_GCMD_TE)
 			iommu_disable_translation(iommu);
+	*/
 
 	if (dmar_dev_scope_init() < 0) {
 		if (force_on)
@@ -5090,5 +5159,6 @@ static void iommu_check_pre_te_status(struct intel_iommu *iommu)
 	if (sts & DMA_GSTS_TES) {
 		pr_info("Translation is enabled prior to OS.\n");
 		iommu->pre_enabled_trans = 1;
+		g_translation_pre_enabled = 1;
 	}
 }
-- 
2.0.0-rc0


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  parent reply	other threads:[~2015-04-10  8:45 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-10  8:42 [PATCH v10 0/10] iommu/vt-d: Fix intel vt-d faults in kdump kernel Li, Zhen-Hua
2015-04-10  8:42 ` Li, Zhen-Hua
2015-04-10  8:42 ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 01/10] iommu/vt-d: New function to attach domain with id Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 02/10] iommu/vt-d: Items required for kdump Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 03/10] iommu/vt-d: Function to get old context entry Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 04/10] iommu/vt-d: functions to copy data from old mem Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-05-07  7:49   ` Baoquan He
2015-05-07  7:49     ` Baoquan He
2015-05-07  8:33     ` Li, ZhenHua
2015-05-07  8:33       ` Li, ZhenHua
2015-05-07  8:33       ` Li, ZhenHua
2015-04-10  8:42 ` [PATCH v10 05/10] iommu/vt-d: Add functions to load and save old re Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 06/10] iommu/vt-d: datatypes and functions used for kdump Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` Li, Zhen-Hua [this message]
2015-04-10  8:42   ` [PATCH v10 07/10] iommu/vt-d: enable kdump support in iommu module Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 08/10] iommu/vt-d: assign new page table for dma_map Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 09/10] iommu/vt-d: Copy functions for irte Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42 ` [PATCH v10 10/10] iommu/vt-d: Use old irte in kdump kernel Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-10  8:42   ` Li, Zhen-Hua
2015-04-15  0:57 ` [PATCH v10 0/10] iommu/vt-d: Fix intel vt-d faults " Dave Young
2015-04-15  5:47   ` Li, ZhenHua
2015-04-15  5:47     ` Li, ZhenHua
2015-04-15  5:47     ` Li, ZhenHua
2015-04-15  6:48     ` Dave Young
2015-04-15  6:48       ` Dave Young
2015-04-21  1:39       ` Li, ZhenHua
2015-04-21  1:39         ` Li, ZhenHua
2015-04-21  2:53         ` Dave Young
2015-04-21  2:53           ` Dave Young
2015-04-21  2:53           ` Dave Young
2015-04-24  8:01       ` Baoquan He
2015-04-24  8:01         ` Baoquan He
2015-04-24  8:25         ` Dave Young
2015-04-24  8:25           ` Dave Young
2015-04-24  8:35           ` Baoquan He
2015-04-24  8:35             ` Baoquan He
2015-04-24  8:49             ` Dave Young
2015-04-24  8:49               ` Dave Young
2015-04-28  8:54               ` Baoquan He
2015-04-28  8:54                 ` Baoquan He
2015-04-28  9:00                 ` Li, ZhenHua
2015-04-28  9:00                   ` Li, ZhenHua
2015-05-04 16:23               ` Joerg Roedel
2015-05-04 16:23                 ` Joerg Roedel
2015-05-05  6:14                 ` Dave Young
2015-05-05  6:14                   ` Dave Young
2015-05-05 15:31                   ` Joerg Roedel
2015-05-05 15:31                     ` Joerg Roedel
2015-05-06  1:51                     ` Dave Young
2015-05-06  1:51                       ` Dave Young
2015-05-06  1:51                       ` Dave Young
2015-05-06  2:37                       ` Li, ZhenHua
2015-05-06  2:37                         ` Li, ZhenHua
2015-05-06  2:37                         ` Li, ZhenHua
2015-05-06  8:25                       ` Joerg Roedel
2015-05-06  8:25                         ` Joerg Roedel
2015-04-23  8:35 ` Li, ZhenHua
2015-04-23  8:35   ` Li, ZhenHua
2015-04-23  8:35   ` Li, ZhenHua
2015-04-23  8:38   ` Li, ZhenHua
2015-04-23  8:38     ` Li, ZhenHua
2015-04-23  8:38     ` Li, ZhenHua
2015-04-29 11:20 ` Baoquan He
2015-04-29 11:20   ` Baoquan He
2015-04-29 11:20   ` Baoquan He
2015-05-03  8:55   ` Baoquan He
2015-05-03  8:55     ` Baoquan He
2015-05-03  8:55     ` Baoquan He
2015-05-04  3:06     ` Li, ZhenHua
2015-05-04  3:06       ` Li, ZhenHua
2015-05-04  3:06       ` Li, ZhenHua
2015-05-04  3:17       ` Baoquan He
2015-05-04  3:17         ` Baoquan He
2015-05-07 17:32         ` Joerg Roedel
2015-05-07 17:32           ` Joerg Roedel
2015-05-08  1:00           ` Li, ZhenHua
2015-05-08  1:00             ` Li, ZhenHua
2015-05-08  1:00             ` Li, ZhenHua
2015-06-11 15:40 ` David Woodhouse
2015-06-11 15:40   ` David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428655333-19504-8-git-send-email-zhen-hual@hp.com \
    --to=zhen-hual@hp.com \
    --cc=alex.williamson@redhat.com \
    --cc=bhe@redhat.com \
    --cc=bhelgaas@google.com \
    --cc=billsumnerlinux@gmail.com \
    --cc=ddutile@redhat.com \
    --cc=doug.hatch@hp.com \
    --cc=dwmw2@infradead.org \
    --cc=dyoung@redhat.com \
    --cc=indou.takao@jp.fujitsu.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=ishii.hironobu@jp.fujitsu.com \
    --cc=jerry.hoemann@hp.com \
    --cc=joro@8bytes.org \
    --cc=kexec@lists.infradead.org \
    --cc=li.zhang6@hp.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=lisa.mitchell@hp.com \
    --cc=rwright@hp.com \
    --cc=tom.vaden@hp.com \
    --cc=vgoyal@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.