@@ -59,6 +59,8 @@ enum pkvm_component_id {
PKVM_ID_HYP,
};
+extern unsigned long hyp_nr_cpus;
+
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
@@ -13,6 +13,10 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
+int hyp_create_pcpu_fixmap(void);
+void *hyp_fixmap_map(phys_addr_t phys);
+int hyp_fixmap_unmap(void);
+
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
int hyp_back_vmemmap(phys_addr_t back);
@@ -21,7 +21,6 @@
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
-extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
static struct hyp_pool host_s2_pool;
@@ -14,6 +14,7 @@
#include <nvhe/early_alloc.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/spinlock.h>
@@ -24,6 +25,7 @@ struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr;
static u64 __io_map_base;
+static DEFINE_PER_CPU(void *, hyp_fixmap_base);
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
@@ -198,6 +200,89 @@ int hyp_map_vectors(void)
return 0;
}
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+ void *addr = *this_cpu_ptr(&hyp_fixmap_base);
+ int ret = kvm_pgtable_hyp_map(&pkvm_pgtable, (u64)addr, PAGE_SIZE,
+ phys, PAGE_HYP);
+ return ret ? NULL : addr;
+}
+
+int hyp_fixmap_unmap(void)
+{
+ void *addr = *this_cpu_ptr(&hyp_fixmap_base);
+ int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)addr, PAGE_SIZE);
+
+ return (ret != PAGE_SIZE) ? -EINVAL : 0;
+}
+
+static int __pin_pgtable_cb(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+ enum kvm_pgtable_walk_flags flag, void * const arg)
+{
+ if (!kvm_pte_valid(*ptep) || level != KVM_PGTABLE_MAX_LEVELS - 1)
+ return -EINVAL;
+ hyp_page_ref_inc(hyp_virt_to_page(ptep));
+
+ return 0;
+}
+
+static int hyp_pin_pgtable_pages(u64 addr)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = __pin_pgtable_cb,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ };
+
+ return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
+}
+
+int hyp_create_pcpu_fixmap(void)
+{
+ unsigned long i;
+ int ret = 0;
+ u64 addr;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ addr = hyp_alloc_private_va_range(PAGE_SIZE);
+ if (IS_ERR((void *)addr)) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /*
+ * Create a dummy mapping, to get the intermediate page-table
+ * pages allocated, then take a reference on the last level
+ * page to keep it around at all times.
+ */
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
+ __hyp_pa(__hyp_bss_start), PAGE_HYP);
+ if (ret) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = hyp_pin_pgtable_pages(addr);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, PAGE_SIZE);
+ if (ret != PAGE_SIZE) {
+ ret = -EINVAL;
+ goto unlock;
+ } else {
+ ret = 0;
+ }
+
+ *per_cpu_ptr(&hyp_fixmap_base, i) = (void *)addr;
+ }
+unlock:
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ return ret;
+}
+
int hyp_create_idmap(u32 hyp_va_bits)
{
unsigned long start, end;
@@ -296,6 +296,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
+ ret = hyp_create_pcpu_fixmap();
+ if (ret)
+ goto out;
+
hyp_shadow_table_init(shadow_table_base);
out:
/*