linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Kai Huang <kai.huang@intel.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: seanjc@google.com, pbonzini@redhat.com, dave.hansen@intel.com,
	len.brown@intel.com, tony.luck@intel.com,
	rafael.j.wysocki@intel.com, reinette.chatre@intel.com,
	dan.j.williams@intel.com, peterz@infradead.org,
	ak@linux.intel.com, kirill.shutemov@linux.intel.com,
	sathyanarayanan.kuppuswamy@linux.intel.com,
	isaku.yamahata@intel.com, kai.huang@intel.com
Subject: [PATCH v4 16/22] x86/virt/tdx: Set up reserved areas for all TDMRs
Date: Wed,  1 Jun 2022 07:39:39 +1200	[thread overview]
Message-ID: <a3d395001264293470f10b79e5c74b44e9550fd7.1654025431.git.kai.huang@intel.com> (raw)
In-Reply-To: <cover.1654025430.git.kai.huang@intel.com>

As the last step of constructing TDMRs, set up reserved areas for all
TDMRs.  For each TDMR, put all memory holes within this TDMR to the
reserved areas.  And for all PAMTs which overlap with this TDMR, put
all the overlapping parts to reserved areas too.

Signed-off-by: Kai Huang <kai.huang@intel.com>
---
 arch/x86/virt/vmx/tdx/tdx.c | 160 +++++++++++++++++++++++++++++++++++-
 1 file changed, 158 insertions(+), 2 deletions(-)

diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 409b49c02329..8d1a9ff7e7b5 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -19,6 +19,7 @@
 #include <linux/memblock.h>
 #include <linux/gfp.h>
 #include <linux/align.h>
+#include <linux/sort.h>
 #include <asm/cpufeatures.h>
 #include <asm/cpufeature.h>
 #include <asm/msr-index.h>
@@ -754,6 +755,157 @@ static unsigned long tdmrs_get_pamt_pages(struct tdmr_info *tdmr_array,
 	return pamt_npages;
 }
 
+static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx,
+			      u64 addr, u64 size)
+{
+	struct tdmr_reserved_area *rsvd_areas = tdmr->reserved_areas;
+	int idx = *p_idx;
+
+	/* Reserved area must be 4K aligned in offset and size */
+	if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK))
+		return -EINVAL;
+
+	/* Cannot exceed maximum reserved areas supported by TDX */
+	if (idx >= tdx_sysinfo.max_reserved_per_tdmr)
+		return -E2BIG;
+
+	rsvd_areas[idx].offset = addr - tdmr->base;
+	rsvd_areas[idx].size = size;
+
+	*p_idx = idx + 1;
+
+	return 0;
+}
+
+/* Compare function called by sort() for TDMR reserved areas */
+static int rsvd_area_cmp_func(const void *a, const void *b)
+{
+	struct tdmr_reserved_area *r1 = (struct tdmr_reserved_area *)a;
+	struct tdmr_reserved_area *r2 = (struct tdmr_reserved_area *)b;
+
+	if (r1->offset + r1->size <= r2->offset)
+		return -1;
+	if (r1->offset >= r2->offset + r2->size)
+		return 1;
+
+	/* Reserved areas cannot overlap.  Caller should guarantee. */
+	WARN_ON_ONCE(1);
+	return -1;
+}
+
+/* Set up reserved areas for a TDMR, including memory holes and PAMTs */
+static int tdmr_set_up_rsvd_areas(struct tdmr_info *tdmr,
+				  struct tdmr_info *tdmr_array,
+				  int tdmr_num)
+{
+	unsigned long start_pfn, end_pfn;
+	int rsvd_idx, i, ret = 0;
+	u64 prev_end;
+
+	/* Mark holes between memory regions as reserved */
+	rsvd_idx = 0;
+	prev_end = tdmr_start(tdmr);
+	memblock_for_each_tdx_mem_pfn_range(i, &start_pfn, &end_pfn, NULL) {
+		u64 start, end;
+
+		start = start_pfn << PAGE_SHIFT;
+		end = end_pfn << PAGE_SHIFT;
+
+		/* Break if this region is after the TDMR */
+		if (start >= tdmr_end(tdmr))
+			break;
+
+		/* Exclude regions before this TDMR */
+		if (end < tdmr_start(tdmr))
+			continue;
+
+		/*
+		 * Skip if no hole exists before this region. "<=" is
+		 * used because one memory region might span two TDMRs
+		 * (when the previous TDMR covers part of this region).
+		 * In this case the start address of this region is
+		 * smaller than the start address of the second TDMR.
+		 *
+		 * Update the prev_end to the end of this region where
+		 * the possible memory hole starts.
+		 */
+		if (start <= prev_end) {
+			prev_end = end;
+			continue;
+		}
+
+		/* Add the hole before this region */
+		ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, prev_end,
+				start - prev_end);
+		if (ret)
+			return ret;
+
+		prev_end = end;
+	}
+
+	/* Add the hole after the last region if it exists. */
+	if (prev_end < tdmr_end(tdmr)) {
+		ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, prev_end,
+				tdmr_end(tdmr) - prev_end);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If any PAMT overlaps with this TDMR, the overlapping part
+	 * must also be put to the reserved area too.  Walk over all
+	 * TDMRs to find out those overlapping PAMTs and put them to
+	 * reserved areas.
+	 */
+	for (i = 0; i < tdmr_num; i++) {
+		struct tdmr_info *tmp = tdmr_array_entry(tdmr_array, i);
+		u64 pamt_start, pamt_end;
+
+		pamt_start = tmp->pamt_4k_base;
+		pamt_end = pamt_start + tmp->pamt_4k_size +
+			tmp->pamt_2m_size + tmp->pamt_1g_size;
+
+		/* Skip PAMTs outside of the given TDMR */
+		if ((pamt_end <= tdmr_start(tdmr)) ||
+				(pamt_start >= tdmr_end(tdmr)))
+			continue;
+
+		/* Only mark the part within the TDMR as reserved */
+		if (pamt_start < tdmr_start(tdmr))
+			pamt_start = tdmr_start(tdmr);
+		if (pamt_end > tdmr_end(tdmr))
+			pamt_end = tdmr_end(tdmr);
+
+		ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, pamt_start,
+				pamt_end - pamt_start);
+		if (ret)
+			return ret;
+	}
+
+	/* TDX requires reserved areas listed in address ascending order */
+	sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area),
+			rsvd_area_cmp_func, NULL);
+
+	return 0;
+}
+
+static int tdmrs_set_up_rsvd_areas_all(struct tdmr_info *tdmr_array,
+				      int tdmr_num)
+{
+	int i;
+
+	for (i = 0; i < tdmr_num; i++) {
+		int ret;
+
+		ret = tdmr_set_up_rsvd_areas(tdmr_array_entry(tdmr_array, i),
+				tdmr_array, tdmr_num);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /*
  * Construct an array of TDMRs to cover all memory regions in memblock.
  * This makes sure all pages managed by the page allocator are TDX
@@ -772,8 +924,12 @@ static int construct_tdmrs_memeblock(struct tdmr_info *tdmr_array,
 	if (ret)
 		goto err;
 
-	/* Return -EINVAL until constructing TDMRs is done */
-	ret = -EINVAL;
+	ret = tdmrs_set_up_rsvd_areas_all(tdmr_array, *tdmr_num);
+	if (ret)
+		goto err_free_pamts;
+
+	return 0;
+err_free_pamts:
 	tdmrs_free_pamt_all(tdmr_array, *tdmr_num);
 err:
 	return ret;
-- 
2.35.3


  parent reply	other threads:[~2022-05-31 19:42 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-31 19:39 [PATCH v4 00/22] TDX host kernel support Kai Huang
2022-05-31 19:39 ` [PATCH v4 01/22] x86/virt/tdx: Detect TDX during kernel boot Kai Huang
2022-05-31 19:39 ` [PATCH v4 02/22] cc_platform: Add new attribute to prevent ACPI CPU hotplug Kai Huang
2022-05-31 19:39 ` [PATCH v4 03/22] cc_platform: Add new attribute to prevent ACPI memory hotplug Kai Huang
2022-05-31 19:39 ` [PATCH v4 04/22] x86/virt/tdx: Prevent ACPI CPU hotplug and " Kai Huang
2022-05-31 19:39 ` [PATCH v4 05/22] x86/virt/tdx: Prevent hot-add driver managed memory Kai Huang
2022-05-31 19:39 ` [PATCH v4 06/22] x86/virt/tdx: Add skeleton to initialize TDX on demand Kai Huang
2022-05-31 19:39 ` [PATCH v4 07/22] x86/virt/tdx: Implement SEAMCALL function Kai Huang
2022-05-31 19:39 ` [PATCH v4 08/22] x86/virt/tdx: Shut down TDX module in case of error Kai Huang
2022-05-31 19:39 ` [PATCH v4 09/22] x86/virt/tdx: Detect TDX module by doing module global initialization Kai Huang
2022-05-31 19:39 ` [PATCH v4 10/22] x86/virt/tdx: Do logical-cpu scope TDX module initialization Kai Huang
2022-05-31 19:39 ` [PATCH v4 11/22] x86/virt/tdx: Get information about TDX module and TDX-capable memory Kai Huang
2022-05-31 19:39 ` [PATCH v4 12/22] x86/virt/tdx: Convert all memory regions in memblock to TDX memory Kai Huang
2022-05-31 19:39 ` [PATCH v4 13/22] x86/virt/tdx: Add placeholder to construct TDMRs based on memblock Kai Huang
2022-05-31 19:39 ` [PATCH v4 14/22] x86/virt/tdx: Create TDMRs to cover all memblock memory regions Kai Huang
2022-05-31 19:39 ` [PATCH v4 15/22] x86/virt/tdx: Allocate and set up PAMTs for TDMRs Kai Huang
2022-05-31 19:39 ` Kai Huang [this message]
2022-05-31 19:39 ` [PATCH v4 17/22] x86/virt/tdx: Reserve TDX module global KeyID Kai Huang
2022-05-31 19:39 ` [PATCH v4 18/22] x86/virt/tdx: Configure TDX module with TDMRs and " Kai Huang
2022-05-31 19:39 ` [PATCH v4 19/22] x86/virt/tdx: Configure global KeyID on all packages Kai Huang
2022-05-31 19:39 ` [PATCH v4 20/22] x86/virt/tdx: Initialize all TDMRs Kai Huang
2022-05-31 19:39 ` [PATCH v4 21/22] x86/virt/tdx: Support kexec() Kai Huang
2022-05-31 19:39 ` [PATCH v4 22/22] Documentation/x86: Add documentation for TDX host support Kai Huang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a3d395001264293470f10b79e5c74b44e9550fd7.1654025431.git.kai.huang@intel.com \
    --to=kai.huang@intel.com \
    --cc=ak@linux.intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@intel.com \
    --cc=isaku.yamahata@intel.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=len.brown@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=reinette.chatre@intel.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=seanjc@google.com \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).