@@ -22,8 +22,10 @@
#include "main.h"
static const size_t ENCL_HEAP_SIZE_DEFAULT = PAGE_SIZE;
+static const unsigned long TIMEOUT_DEFAULT = 900;
static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
+
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
/*
@@ -387,7 +389,7 @@ TEST_F(enclave, unclobbered_vdso_oversubscribed)
EXPECT_EQ(self->run.user_data, 0);
}
-TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
+TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, TIMEOUT_DEFAULT)
{
struct sgx_enclave_remove_pages remove_ioc;
struct sgx_enclave_modify_types modt_ioc;
@@ -1245,6 +1247,114 @@ TEST_F(enclave, augment_via_eaccept)
munmap(addr, PAGE_SIZE);
}
+/*
+ * Test for the addition of large number of pages to an initialized enclave via
+ * a pre-emptive run of EACCEPT on every page to be added.
+ */
+TEST_F_TIMEOUT(enclave, augment_via_eaccept_long, TIMEOUT_DEFAULT)
+{
+ /*
+ * The dynamic heap size was chosen based on a bug report:
+ * Message-ID:
+ * <DM8PR11MB55912A7F47A84EC9913A6352F6999@DM8PR11MB5591.namprd11.prod.outlook.com>
+ */
+ static const unsigned long DYNAMIC_HEAP_SIZE = 0x200000L * PAGE_SIZE;
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ unsigned long i;
+ void *addr;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl_dynamic(ENCL_HEAP_SIZE_DEFAULT, DYNAMIC_HEAP_SIZE,
+ &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * mmap() every page at end of existing enclave to be used for
+ * EDMM.
+ */
+ addr = mmap((void *)self->encl.encl_base + total_size, DYNAMIC_HEAP_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
+ self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Run EACCEPT on every page to trigger the #PF->EAUG->EACCEPT(again
+ * without a #PF). All should be transparent to userspace.
+ */
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ for (i = 0; i < DYNAMIC_HEAP_SIZE; i += PAGE_SIZE) {
+ eaccept_op.epc_addr = (uint64_t)(addr + i);
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+ if (self->run.exception_vector == 14 &&
+ self->run.exception_error_code == 4 &&
+ self->run.exception_addr == self->encl.encl_base) {
+ munmap(addr, DYNAMIC_HEAP_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ ASSERT_EQ(eaccept_op.ret, 0);
+ ASSERT_EQ(self->run.function, EEXIT);
+ }
+
+ /*
+ * Pool of pages were successfully added to enclave. Perform sanity
+ * check on first page of the pool only to ensure data can be written
+ * to and read from a dynamically added enclave page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, DYNAMIC_HEAP_SIZE);
+}
+
/*
* SGX2 page type modification test in two phases:
* Phase 1: