@@ -21,6 +21,7 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot);
+unsigned long pkvm_alloc_private_va_range(size_t size);
static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
unsigned long *start, unsigned long *end)
@@ -37,38 +37,54 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
return err;
}
-unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
- enum kvm_pgtable_prot prot)
+/**
+ * pkvm_alloc_private_va_range - Allocates a private VA range.
+ * @size: The size of the VA range to reserve.
+ *
+ * The private VA range is allocated above __io_map_base and
+ * aligned based on the order of @size.
+ */
+unsigned long pkvm_alloc_private_va_range(size_t size)
{
- unsigned long addr;
- int err;
+ unsigned long base, addr;
hyp_spin_lock(&pkvm_pgd_lock);
- size = PAGE_ALIGN(size + offset_in_page(phys));
- addr = __io_map_base;
- __io_map_base += size;
+ /* Align the allocation based on the order of its size */
+ addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
+
+ /* The allocated size is always a multiple of PAGE_SIZE */
+ base = addr + PAGE_ALIGN(size);
/* Are we overflowing on the vmemmap ? */
- if (__io_map_base > __hyp_vmemmap) {
- __io_map_base -= size;
+ if (!addr || base > __hyp_vmemmap)
addr = (unsigned long)ERR_PTR(-ENOMEM);
- goto out;
- }
+ else
+ __io_map_base = base;
- err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
- if (err) {
- addr = (unsigned long)ERR_PTR(err);
- goto out;
- }
-
- addr = addr + offset_in_page(phys);
-out:
hyp_spin_unlock(&pkvm_pgd_lock);
return addr;
}
+unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
+ enum kvm_pgtable_prot prot)
+{
+ unsigned long addr;
+ int err;
+
+ size += offset_in_page(phys);
+ addr = pkvm_alloc_private_va_range(size);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ err = __pkvm_create_mappings(addr, size, phys, prot);
+ if (err)
+ return (unsigned long)ERR_PTR(err);
+
+ return addr + offset_in_page(phys);
+}
+
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
{
unsigned long start = (unsigned long)from;
@@ -155,7 +171,7 @@ int hyp_map_vectors(void)
bp_base = (void *)__pkvm_create_private_mapping(phys,
__BP_HARDEN_HYP_VECS_SZ,
PAGE_HYP_EXEC);
- if (IS_ERR_OR_NULL(bp_base))
+ if (IS_ERR(bp_base))
return PTR_ERR(bp_base);
__hyp_bp_vect_base = bp_base;
pkvm_hyp_alloc_private_va_range() can be used to reserve private VA ranges in the pKVM nVHE hypervisor. Allocations are aligned based on the order of the requested size. This will be used to implement stack guard pages for pKVM nVHE hypervisor (in a subsequent patch in the series). Credits to Quentin Perret <qperret@google.com> for the idea of moving private VA allocation out of __pkvm_create_private_mapping() Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- Changes in v5: - Align private allocations based on the order of their size, per Marc Changes in v4: - Handle null ptr in pkvm_alloc_private_va_range() and replace IS_ERR_OR_NULL checks in callers with IS_ERR checks, per Fuad - Fix kernel-doc comments format, per Fuad - Format __pkvm_create_private_mapping() prototype args (< 80 col), per Fuad Changes in v3: - Handle null ptr in IS_ERR_OR_NULL checks, per Mark Changes in v2: - Allow specifying an alignment for the private VA allocations, per Marc arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 + arch/arm64/kvm/hyp/nvhe/mm.c | 56 ++++++++++++++++++---------- 2 files changed, 37 insertions(+), 20 deletions(-)