@@ -19,6 +19,7 @@ struct sgx_add_page_req {
struct sgx_secinfo secinfo;
unsigned long mrmask;
struct list_head list;
+ bool zero_page;
};
static int sgx_encl_grow(struct sgx_encl *encl)
@@ -76,6 +77,7 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
struct sgx_pageinfo pginfo;
struct page *backing;
unsigned long addr;
+ void *contents;
int ret;
int i;
@@ -84,9 +86,15 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
addr = SGX_ENCL_PAGE_ADDR(encl_page);
- backing = sgx_encl_get_backing_page(encl, page_index);
- if (IS_ERR(backing))
- return false;
+ if (!req->zero_page) {
+ backing = sgx_encl_get_backing_page(encl, page_index);
+ if (IS_ERR(backing))
+ return false;
+ contents = kmap_atomic(backing);
+ } else {
+ backing = NULL;
+ contents = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
+ }
/*
* The SECINFO field must be 64-byte aligned, copy it to a local
@@ -99,11 +107,13 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
pginfo.addr = addr;
pginfo.metadata = (unsigned long)&secinfo;
- pginfo.contents = (unsigned long)kmap_atomic(backing);
+ pginfo.contents = (unsigned long)contents;
ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
- kunmap_atomic((void *)(unsigned long)pginfo.contents);
- put_page(backing);
+ if (backing) {
+ kunmap_atomic(contents);
+ put_page(backing);
+ }
if (ret) {
if (encls_failed(ret))
@@ -506,18 +516,20 @@ static int sgx_encl_queue_page(struct sgx_encl *encl,
if (!req)
return -ENOMEM;
- backing = sgx_encl_get_backing_page(encl, page_index);
- if (IS_ERR(backing)) {
- kfree(req);
- return PTR_ERR(backing);
- }
+ if (data) {
+ backing = sgx_encl_get_backing_page(encl, page_index);
+ if (IS_ERR(backing)) {
+ kfree(req);
+ return PTR_ERR(backing);
+ }
- backing_ptr = kmap(backing);
- if (data)
+ backing_ptr = kmap(backing);
memcpy(backing_ptr, data, PAGE_SIZE);
- else
- memset(backing_ptr, 0, PAGE_SIZE);
- kunmap(backing);
+ kunmap(backing);
+ } else {
+ backing = NULL;
+ req->zero_page = true;
+ }
if (page_type == SGX_SECINFO_TCS)
encl_page->desc |= SGX_ENCL_PAGE_TCS;
memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
@@ -529,8 +541,10 @@ static int sgx_encl_queue_page(struct sgx_encl *encl,
list_add_tail(&req->list, &encl->add_page_reqs);
if (empty)
queue_work(sgx_encl_wq, &encl->work);
- set_page_dirty(backing);
- put_page(backing);
+ if (backing) {
+ set_page_dirty(backing);
+ put_page(backing);
+ }
return 0;
}
Using the zero page avoids dirtying the backing page, inserting TLB entries, the cost of memset, etc... For some enclaves, e.g. an enclave with a small code footprint and a large working set, this results in a 20%+ reduction in enclave build time. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/driver/ioctl.c | 50 ++++++++++++++++---------- 1 file changed, 32 insertions(+), 18 deletions(-)