@@ -232,6 +232,11 @@ void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page);
void sgx_etrack(struct sgx_encl *encl);
int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
unsigned long addr, unsigned int alloc_flags);
-unsigned long sgx_swap_pages(unsigned long nr_to_scan);
+
+enum sgx_swap_flags {
+ SGX_SWAP_IGNORE_LRU = BIT(0),
+};
+unsigned long sgx_swap_pages(unsigned long nr_to_scan,
+ unsigned int flags);
#endif /* __ARCH_X86_INTEL_SGX_H__ */
@@ -342,7 +342,8 @@ static inline void sgx_lru_putback(struct list_head *src)
spin_unlock(&lru->lock);
}
-unsigned long sgx_swap_pages(unsigned long nr_to_scan)
+unsigned long sgx_swap_pages(unsigned long nr_to_scan,
+ unsigned int flags)
{
struct sgx_epc_page *entry, *tmp;
struct sgx_encl *encl;
@@ -369,7 +370,8 @@ unsigned long sgx_swap_pages(unsigned long nr_to_scan)
down_read(&encl->mm->mmap_sem);
sgx_del_if_dead(encl, &swap, &skip);
- sgx_age_pages(&swap, &skip);
+ if (!(flags & SGX_SWAP_IGNORE_LRU))
+ sgx_age_pages(&swap, &skip);
if (!list_empty(&swap)) {
mutex_lock(&encl->lock);
@@ -399,7 +401,7 @@ int ksgxswapd(void *p)
sgx_nr_free_pages < sgx_nr_high_pages);
if (sgx_nr_free_pages < sgx_nr_high_pages)
- sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX);
+ sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX, 0);
}
pr_info("%s: done\n", __func__);
@@ -512,7 +514,7 @@ struct sgx_epc_page *sgx_alloc_page(unsigned int flags)
break;
}
- sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX);
+ sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX, 0);
schedule();
}
Add a flag, SGX_SWAP_IGNORE_LRU, that when passed to sgx_swap_pages causes the EPC swap flow to skip LRU checks that might result in an EPC page not being selected for swapping. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- drivers/platform/x86/intel_sgx/sgx.h | 7 ++++++- drivers/platform/x86/intel_sgx/sgx_page_cache.c | 10 ++++++---- 2 files changed, 12 insertions(+), 5 deletions(-)