@@ -53,6 +53,20 @@ static struct mem_area areas[MAX_AREAS];
/* Mask of initialized areas */
static unsigned int areas_mask;
/* Protects areas and areas mask */
+
+#ifndef set_memory_encrypted
+static inline void set_memory_encrypted(unsigned long mem, unsigned long size)
+{
+}
+#endif
+
+#ifndef set_memory_decrypted
+static inline void set_memory_decrypted(unsigned long mem, unsigned long size)
+{
+}
+#endif
+
+
static struct spinlock lock;
bool page_alloc_initialized(void)
@@ -263,7 +277,7 @@ static bool coalesce(struct mem_area *a, u8 order, pfn_t pfn, pfn_t pfn2)
* - no pages in the memory block were already free
* - no pages in the memory block are special
*/
-static void _free_pages(void *mem)
+static void _free_pages(void *mem, u32 flags)
{
pfn_t pfn2, pfn = virt_to_pfn(mem);
struct mem_area *a = NULL;
@@ -281,6 +295,9 @@ static void _free_pages(void *mem)
p = pfn - a->base;
order = a->page_states[p] & ORDER_MASK;
+ if (flags & FLAG_SHARED)
+ set_memory_encrypted((unsigned long)mem, BIT(order) * PAGE_SIZE);
+
/* ensure that the first page is allocated and not special */
assert(IS_ALLOCATED(a->page_states[p]));
/* ensure that the order has a sane value */
@@ -320,7 +337,14 @@ static void _free_pages(void *mem)
void free_pages(void *mem)
{
spin_lock(&lock);
- _free_pages(mem);
+ _free_pages(mem, 0);
+ spin_unlock(&lock);
+}
+
+void free_pages_shared(void *mem)
+{
+ spin_lock(&lock);
+ _free_pages(mem, FLAG_SHARED);
spin_unlock(&lock);
}
@@ -353,7 +377,7 @@ static void _unreserve_one_page(pfn_t pfn)
i = pfn - a->base;
assert(a->page_states[i] == STATUS_SPECIAL);
a->page_states[i] = STATUS_ALLOCATED;
- _free_pages(pfn_to_virt(pfn));
+ _free_pages(pfn_to_virt(pfn), 0);
}
int reserve_pages(phys_addr_t addr, size_t n)
@@ -401,6 +425,10 @@ static void *page_memalign_order_flags(u8 al, u8 ord, u32 flags)
if (area & BIT(i))
res = page_memalign_order(areas + i, al, ord, fresh);
spin_unlock(&lock);
+
+ if (res && (flags & FLAG_SHARED))
+ set_memory_decrypted((unsigned long)res, BIT(ord) * PAGE_SIZE);
+
if (res && !(flags & FLAG_DONTZERO))
memset(res, 0, BIT(ord) * PAGE_SIZE);
return res;
@@ -21,6 +21,7 @@
#define FLAG_DONTZERO 0x10000
#define FLAG_FRESH 0x20000
+#define FLAG_SHARED 0x40000
/* Returns true if the page allocator has been initialized */
bool page_alloc_initialized(void);
@@ -121,4 +122,27 @@ int reserve_pages(phys_addr_t addr, size_t npages);
*/
void unreserve_pages(phys_addr_t addr, size_t npages);
+/* Shared page operations */
+static inline void *alloc_pages_shared(unsigned long order)
+{
+ return alloc_pages_flags(order, FLAG_SHARED);
+}
+
+static inline void *alloc_page_shared(void)
+{
+ return alloc_pages_shared(0);
+}
+
+void free_pages_shared(void *mem);
+
+static inline void free_page_shared(void *page)
+{
+ free_pages_shared(page);
+}
+
+static inline void free_pages_shared_by_order(void *mem, unsigned long order)
+{
+ free_pages_shared(mem);
+}
+
#endif
Add support for allocating "pages" that can be shared with the host. Or in other words, decrypted pages. This is achieved by adding hooks for setting a memory region as "encrypted" or "decrypted", which can be overridden by the architecture specific backends. Also add a new flag - FLAG_SHARED - for allocating shared pages. The page allocation/free routines get a "_shared_" variant too. These will be later used for Realm support and tests. Signed-off-by: Joey Gouly <joey.gouly@arm.com> --- lib/alloc_page.c | 34 +++++++++++++++++++++++++++++++--- lib/alloc_page.h | 24 ++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-)