@@ -1941,6 +1941,7 @@ config X86_SGX
depends on CRYPTO_SHA256=y
select SRCU
select MMU_NOTIFIER
+ select NUMA_KEEP_MEMINFO if NUMA
help
Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions
that can be used by applications to set aside private regions of code
@@ -18,6 +18,16 @@ static int sgx_nr_epc_sections;
static struct task_struct *ksgxd_tsk;
static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
+/* Nodes with one or more EPC sections. */
+static nodemask_t sgx_numa_mask;
+
+/*
+ * Array with one list_head for each possible NUMA node. Each
+ * list contains all the sgx_epc_section's which are on that
+ * node.
+ */
+struct list_head *sgx_numa_nodes;
+
/*
* These variables are part of the state of the reclaimer, and must be accessed
* with sgx_reclaimer_lock acquired.
@@ -473,6 +483,26 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
return page;
}
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
+{
+ struct sgx_epc_section *section;
+ struct sgx_epc_page *page;
+
+ if (WARN_ON_ONCE(nid < 0 || nid >= num_possible_nodes()))
+ return NULL;
+
+ if (!node_isset(nid, sgx_numa_mask))
+ return NULL;
+
+ list_for_each_entry(section, &sgx_numa_nodes[nid], numa_section_list) {
+ page = __sgx_alloc_epc_page_from_section(section);
+ if (page)
+ return page;
+ }
+
+ return NULL;
+}
+
/**
* __sgx_alloc_epc_page() - Allocate an EPC page
*
@@ -485,13 +515,19 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
*/
struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
+ int current_nid = numa_node_id();
struct sgx_epc_section *section;
struct sgx_epc_page *page;
int i;
+ /* Try to allocate EPC from the current node, first: */
+ page = __sgx_alloc_epc_page_from_node(current_nid);
+ if (page)
+ return page;
+
+ /* Search all EPC sections, ignoring locality: */
for (i = 0; i < sgx_nr_epc_sections; i++) {
section = &sgx_epc_sections[i];
-
page = __sgx_alloc_epc_page_from_section(section);
if (page)
return page;
@@ -665,8 +701,12 @@ static bool __init sgx_page_cache_init(void)
{
u32 eax, ebx, ecx, edx, type;
u64 pa, size;
+ int nid;
int i;
+ nodes_clear(sgx_numa_mask);
+ sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
+
for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
@@ -690,6 +730,21 @@ static bool __init sgx_page_cache_init(void)
}
sgx_nr_epc_sections++;
+
+ nid = numa_map_to_online_node(phys_to_target_node(pa));
+
+ if (nid == NUMA_NO_NODE) {
+ /* The physical address is already printed above. */
+ pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
+ nid = 0;
+ }
+
+ if (!node_isset(nid, sgx_numa_mask)) {
+ INIT_LIST_HEAD(&sgx_numa_nodes[nid]);
+ node_set(nid, sgx_numa_mask);
+ }
+
+ list_add_tail(&sgx_epc_sections[i].numa_section_list, &sgx_numa_nodes[nid]);
}
if (!sgx_nr_epc_sections) {
@@ -45,6 +45,7 @@ struct sgx_epc_section {
spinlock_t lock;
struct list_head page_list;
unsigned long free_cnt;
+ struct list_head numa_section_list;
/*
* Pages which need EREMOVE run on them before they can be
Background ========== EPC section is covered by one or more SRAT entries that are associated with one and only one PXM (NUMA node). The motivation behind this patch is to provide basic elements of building allocation scheme based on this premise. It does not try to fully address NUMA. For instance, it does not provide integration to the mempolicy API, but neither does introduce any bottlenecks to address this later on. Memory allocation is a complex topic, and thus it's better to start with baby steps. Solution ======== Use phys_to_target_node() to associate each NUMA node with the EPC sections contained within its range. In sgx_alloc_epc_page(), first try to allocate from the NUMA node, where the CPU is executing. If that fails, fallback to the legacy allocation. Link: https://lore.kernel.org/lkml/158188326978.894464.217282995221175417.stgit@dwillia2-desk3.amr.corp.intel.com/ Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org> --- v2: * s/section_list/numa_section_list/ * s/MAX_NUMNODES//num_possible_nodes()/ * Add more verbose inline comments that Dave provided. * If NUMA mapping fails, print a warning and description of the fallbck. The physical address is alredy printed in pr_info(), just before the mapping happens. arch/x86/Kconfig | 1 + arch/x86/kernel/cpu/sgx/main.c | 57 +++++++++++++++++++++++++++++++++- arch/x86/kernel/cpu/sgx/sgx.h | 1 + 3 files changed, 58 insertions(+), 1 deletion(-)