@@ -30,35 +30,33 @@ static DEFINE_SPINLOCK(sgx_reclaimer_lock);
* Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
* pages whose child pages blocked EREMOVE.
*/
-static void sgx_sanitize_section(struct sgx_epc_section *section)
+static void sgx_sanitize_section(struct list_head *laundry)
{
struct sgx_epc_page *page;
LIST_HEAD(dirty);
int ret;
/* init_laundry_list is thread-local, no need for a lock: */
- while (!list_empty(§ion->init_laundry_list)) {
+ while (!list_empty(laundry)) {
if (kthread_should_stop())
return;
- /* needed for access to ->page_list: */
- spin_lock(§ion->lock);
-
- page = list_first_entry(§ion->init_laundry_list,
- struct sgx_epc_page, list);
+ page = list_first_entry(laundry, struct sgx_epc_page, list);
ret = __eremove(sgx_get_epc_virt_addr(page));
- if (!ret)
- list_move(&page->list, §ion->page_list);
- else
+ if (!ret) {
+ /* The page is clean - move to the free list. */
+ list_del(&page->list);
+ sgx_free_epc_page(page);
+ } else {
+ /* The page is not yet clean - move to the dirty list. */
list_move_tail(&page->list, &dirty);
-
- spin_unlock(§ion->lock);
+ }
cond_resched();
}
- list_splice(&dirty, §ion->init_laundry_list);
+ list_splice(&dirty, laundry);
}
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -405,6 +403,7 @@ static bool sgx_should_reclaim(unsigned long watermark)
static int ksgxd(void *p)
{
+ struct list_head *laundry = p;
int i;
set_freezable();
@@ -413,16 +412,13 @@ static int ksgxd(void *p)
* Sanitize pages in order to recover from kexec(). The 2nd pass is
* required for SECS pages, whose child pages blocked EREMOVE.
*/
- for (i = 0; i < sgx_nr_epc_sections; i++)
- sgx_sanitize_section(&sgx_epc_sections[i]);
+ sgx_sanitize_section(laundry);
+ sgx_sanitize_section(laundry);
- for (i = 0; i < sgx_nr_epc_sections; i++) {
- sgx_sanitize_section(&sgx_epc_sections[i]);
+ if (!list_empty(laundry))
+ WARN(1, "EPC section %d has unsanitized pages.\n", i);
- /* Should never happen. */
- if (!list_empty(&sgx_epc_sections[i].init_laundry_list))
- WARN(1, "EPC section %d has unsanitized pages.\n", i);
- }
+ kfree(laundry);
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -441,11 +437,11 @@ static int ksgxd(void *p)
return 0;
}
-static bool __init sgx_page_reclaimer_init(void)
+static bool __init sgx_page_reclaimer_init(struct list_head *laundry)
{
struct task_struct *tsk;
- tsk = kthread_run(ksgxd, NULL, "ksgxd");
+ tsk = kthread_run(ksgxd, laundry, "ksgxd");
if (IS_ERR(tsk))
return false;
@@ -619,7 +615,8 @@ void sgx_free_epc_page(struct sgx_epc_page *page)
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
unsigned long index,
- struct sgx_epc_section *section)
+ struct sgx_epc_section *section,
+ struct list_head *laundry)
{
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned long i;
@@ -637,13 +634,12 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
section->phys_addr = phys_addr;
spin_lock_init(§ion->lock);
INIT_LIST_HEAD(§ion->page_list);
- INIT_LIST_HEAD(§ion->init_laundry_list);
for (i = 0; i < nr_pages; i++) {
section->pages[i].section = index;
section->pages[i].flags = 0;
section->pages[i].owner = NULL;
- list_add_tail(§ion->pages[i].list, §ion->init_laundry_list);
+ list_add_tail(§ion->pages[i].list, laundry);
}
section->free_cnt = nr_pages;
@@ -661,7 +657,7 @@ static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
((high & GENMASK_ULL(19, 0)) << 32);
}
-static bool __init sgx_page_cache_init(void)
+static bool __init sgx_page_cache_init(struct list_head *laundry)
{
u32 eax, ebx, ecx, edx, type;
u64 pa, size;
@@ -684,7 +680,7 @@ static bool __init sgx_page_cache_init(void)
pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
- if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
+ if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i], laundry)) {
pr_err("No free memory for an EPC section\n");
break;
}
@@ -702,18 +698,25 @@ static bool __init sgx_page_cache_init(void)
static int __init sgx_init(void)
{
+ struct list_head *laundry;
int ret;
int i;
if (!cpu_feature_enabled(X86_FEATURE_SGX))
return -ENODEV;
- if (!sgx_page_cache_init()) {
+ laundry = kzalloc(sizeof(*laundry), GFP_KERNEL);
+ if (!laundry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(laundry);
+
+ if (!sgx_page_cache_init(laundry)) {
ret = -ENOMEM;
goto err_page_cache;
}
- if (!sgx_page_reclaimer_init()) {
+ if (!sgx_page_reclaimer_init(laundry)) {
ret = -ENOMEM;
goto err_page_cache;
}
@@ -733,6 +736,7 @@ static int __init sgx_init(void)
memunmap(sgx_epc_sections[i].virt_addr);
}
+ kfree(laundry);
return ret;
}
@@ -45,13 +45,6 @@ struct sgx_epc_section {
spinlock_t lock;
struct list_head page_list;
unsigned long free_cnt;
-
- /*
- * Pages which need EREMOVE run on them before they can be
- * used. Only safe to be accessed in ksgxd and init code.
- * Not protected by locks.
- */
- struct list_head init_laundry_list;
};
extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
Build a local laundry list in sgx_init(), and transfer its ownsership to ksgxd for sanitization, thus getting rid of useless member in struct sgx_epc_section. Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org> --- arch/x86/kernel/cpu/sgx/main.c | 64 ++++++++++++++++++---------------- arch/x86/kernel/cpu/sgx/sgx.h | 7 ---- 2 files changed, 34 insertions(+), 37 deletions(-)