@@ -34,6 +34,11 @@ static DEFINE_XARRAY(sgx_epc_address_space);
*/
static struct sgx_epc_lru_lists sgx_global_lru;
+static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page)
+{
+ return &sgx_global_lru;
+}
+
static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
/* Nodes with one or more EPC sections. */
@@ -286,6 +291,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
/**
* sgx_reclaim_pages() - Reclaim EPC pages from the consumers
* @nr_to_scan: Number of EPC pages to scan for reclaim
+ * @ignore_age: Reclaim a page even if it is young
*
* Take a fixed number of pages from the head of the active page pool and
* reclaim them to the enclave's private shmem files. Skip the pages, which have
@@ -299,11 +305,12 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
* problematic as it would increase the lock contention too much, which would
* halt forward progress.
*/
-static size_t sgx_reclaim_pages(size_t nr_to_scan)
+static size_t sgx_reclaim_pages(size_t nr_to_scan, bool ignore_age)
{
struct sgx_backing backing[SGX_NR_TO_SCAN_MAX];
struct sgx_epc_page *epc_page, *tmp;
struct sgx_encl_page *encl_page;
+ struct sgx_epc_lru_lists *lru;
pgoff_t page_index;
LIST_HEAD(iso);
size_t ret;
@@ -339,7 +346,8 @@ static size_t sgx_reclaim_pages(size_t nr_to_scan)
list_for_each_entry_safe(epc_page, tmp, &iso, list) {
encl_page = epc_page->encl_page;
- if (i == SGX_NR_TO_SCAN_MAX || !sgx_reclaimer_age(epc_page))
+ if (i == SGX_NR_TO_SCAN_MAX ||
+ (!ignore_age && !sgx_reclaimer_age(epc_page)))
goto skip;
page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
@@ -357,10 +365,11 @@ static size_t sgx_reclaim_pages(size_t nr_to_scan)
continue;
skip:
- spin_lock(&sgx_global_lru.lock);
+ lru = sgx_lru_lists(epc_page);
+ spin_lock(&lru->lock);
sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE);
- list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable);
- spin_unlock(&sgx_global_lru.lock);
+ list_move_tail(&epc_page->list, &lru->reclaimable);
+ spin_unlock(&lru->lock);
kref_put(&encl_page->encl->refcount, sgx_encl_release);
}
@@ -395,7 +404,7 @@ static bool sgx_should_reclaim(unsigned long watermark)
void sgx_reclaim_direct(void)
{
if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
- sgx_reclaim_pages(SGX_NR_TO_SCAN);
+ sgx_reclaim_pages(SGX_NR_TO_SCAN, false);
}
static int ksgxd(void *p)
@@ -418,7 +427,7 @@ static int ksgxd(void *p)
sgx_should_reclaim(SGX_NR_HIGH_PAGES));
if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
- sgx_reclaim_pages(SGX_NR_TO_SCAN);
+ sgx_reclaim_pages(SGX_NR_TO_SCAN, false);
cond_resched();
}
@@ -514,14 +523,16 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
*/
void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags)
{
- spin_lock(&sgx_global_lru.lock);
+ struct sgx_epc_lru_lists *lru = sgx_lru_lists(page);
+
+ spin_lock(&lru->lock);
WARN_ON_ONCE(sgx_epc_page_reclaimable(page->flags));
page->flags |= flags;
if (sgx_epc_page_reclaimable(flags))
- list_add_tail(&page->list, &sgx_global_lru.reclaimable);
+ list_add_tail(&page->list, &lru->reclaimable);
else
- list_add_tail(&page->list, &sgx_global_lru.unreclaimable);
- spin_unlock(&sgx_global_lru.lock);
+ list_add_tail(&page->list, &lru->unreclaimable);
+ spin_unlock(&lru->lock);
}
/**
@@ -536,15 +547,16 @@ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags)
*/
int sgx_drop_epc_page(struct sgx_epc_page *page)
{
- spin_lock(&sgx_global_lru.lock);
+ struct sgx_epc_lru_lists *lru = sgx_lru_lists(page);
+
+ spin_lock(&lru->lock);
if (sgx_epc_page_reclaim_in_progress(page->flags)) {
- spin_unlock(&sgx_global_lru.lock);
+ spin_unlock(&lru->lock);
return -EBUSY;
}
-
list_del(&page->list);
sgx_epc_page_reset_state(page);
- spin_unlock(&sgx_global_lru.lock);
+ spin_unlock(&lru->lock);
return 0;
}
@@ -590,7 +602,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
break;
}
- sgx_reclaim_pages(SGX_NR_TO_SCAN);
+ sgx_reclaim_pages(SGX_NR_TO_SCAN, false);
cond_resched();
}