linux-sgx.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RFC] x86/sgx: Allocate form local NUMA node first
@ 2020-06-23  4:39 Jarkko Sakkinen
  2020-06-24 23:12 ` Jarkko Sakkinen
  2020-06-24 23:24 ` Dave Hansen
  0 siblings, 2 replies; 7+ messages in thread
From: Jarkko Sakkinen @ 2020-06-23  4:39 UTC (permalink / raw)
  To: linux-sgx
  Cc: kai.svahn, bruce.schlobohm, Jarkko Sakkinen, Dave Hansen,
	Sean Christopherson

Create a pointer array for each NUMA node with the references to the
contained EPC sections. Use this in __sgx_alloc_epc_page() to knock the
current NUMA node before the others.

Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 arch/x86/kernel/cpu/sgx/main.c | 70 ++++++++++++++++++++++++++++++----
 1 file changed, 63 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 3594d37d545f..c9b47cf87730 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -13,6 +13,13 @@
 #include "encl.h"
 #include "encls.h"
 
+struct sgx_numa_node {
+	struct sgx_epc_section *sections[SGX_MAX_EPC_SECTIONS];
+	int nr_sections;
+};
+
+static struct sgx_numa_node sgx_numa_nodes[MAX_NUMNODES];
+static int sgx_nr_numa_nodes;
 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
 static int sgx_nr_epc_sections;
 static struct task_struct *ksgxswapd_tsk;
@@ -502,6 +509,27 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
 	return page;
 }
 
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
+{
+	struct sgx_numa_node *node = &sgx_numa_nodes[nid];
+	struct sgx_epc_section *section;
+	struct sgx_epc_page *page;
+	int i;
+
+	for (i = 0; i < node->nr_sections; i++) {
+		section = node->sections[i];
+		spin_lock(&section->lock);
+		page = __sgx_alloc_epc_page_from_section(section);
+		spin_unlock(&section->lock);
+
+		if (page)
+			return page;
+	}
+
+	return NULL;
+}
+
+
 /**
  * __sgx_alloc_epc_page() - Allocate an EPC page
  *
@@ -514,16 +542,19 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
  */
 struct sgx_epc_page *__sgx_alloc_epc_page(void)
 {
-	struct sgx_epc_section *section;
 	struct sgx_epc_page *page;
+	int nid = numa_node_id();
 	int i;
 
-	for (i = 0; i < sgx_nr_epc_sections; i++) {
-		section = &sgx_epc_sections[i];
-		spin_lock(&section->lock);
-		page = __sgx_alloc_epc_page_from_section(section);
-		spin_unlock(&section->lock);
+	page = __sgx_alloc_epc_page_from_node(nid);
+	if (page)
+		return page;
 
+	for (i = 0; i < sgx_nr_numa_nodes; i++) {
+		if (i == nid)
+			continue;
+
+		page = __sgx_alloc_epc_page_from_node(i);
 		if (page)
 			return page;
 	}
@@ -684,11 +715,28 @@ static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
 	       ((high & GENMASK_ULL(19, 0)) << 32);
 }
 
+static int __init sgx_pfn_to_nid(unsigned long pfn)
+{
+	pg_data_t *pgdat;
+	int nid;
+
+	for (nid = 0; nid < nr_node_ids; nid++) {
+		pgdat = NODE_DATA(nid);
+
+		if (pfn >= pgdat->node_start_pfn &&
+		    pfn < (pgdat->node_start_pfn + pgdat->node_spanned_pages))
+			return nid;
+	}
+
+	return 0;
+}
+
 static bool __init sgx_page_cache_init(void)
 {
 	u32 eax, ebx, ecx, edx, type;
+	struct sgx_numa_node *node;
 	u64 pa, size;
-	int i;
+	int i, nid;
 
 	for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
 		cpuid_count(SGX_CPUID, i + SGX_CPUID_FIRST_VARIABLE_SUB_LEAF,
@@ -714,6 +762,14 @@ static bool __init sgx_page_cache_init(void)
 		}
 
 		sgx_nr_epc_sections++;
+
+		nid = sgx_pfn_to_nid(PFN_DOWN(pa));
+		node = &sgx_numa_nodes[nid];
+
+		node->sections[node->nr_sections] = &sgx_epc_sections[i];
+		node->nr_sections++;
+
+		sgx_nr_numa_nodes = max(sgx_nr_numa_nodes, nid + 1);
 	}
 
 	if (!sgx_nr_epc_sections) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-06-25  2:57 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-23  4:39 [PATCH RFC] x86/sgx: Allocate form local NUMA node first Jarkko Sakkinen
2020-06-24 23:12 ` Jarkko Sakkinen
2020-06-24 23:24 ` Dave Hansen
2020-06-24 23:54   ` Sean Christopherson
2020-06-25  0:25     ` Dave Hansen
2020-06-25  0:57       ` Sean Christopherson
2020-06-25  2:57         ` Jarkko Sakkinen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).