@@ -746,6 +746,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
extern phys_addr_t hyp_mem_base;
extern phys_addr_t hyp_mem_size;
void __init reserve_kvm_hyp(void);
+void kvm_sort_memblock_regions(void);
#else
static inline void reserve_kvm_hyp(void) { }
#endif
@@ -1685,6 +1685,7 @@ static int kvm_hyp_enable_protection(void)
return ret;
kvm_set_hyp_vector();
+ kvm_sort_memblock_regions();
ret = kvm_call_hyp_nvhe(__kvm_hyp_protect, hyp_mem_base, hyp_mem_size,
num_possible_cpus(), kern_hyp_va(per_cpu_base));
if (ret)
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <linux/memblock.h>
+#include <linux/sort.h>
#include <asm/kvm_host.h>
@@ -31,6 +32,23 @@ void __init early_init_dt_add_memory_hyp(u64 base, u64 size)
kvm_nvhe_sym(hyp_memblock_nr)++;
}
+static int cmp_hyp_memblock(const void *p1, const void *p2)
+{
+ const struct hyp_memblock_region *r1 = p1;
+ const struct hyp_memblock_region *r2 = p2;
+
+ return r1->start < r2->start ? -1 : (r1->start > r2->start);
+}
+
+void kvm_sort_memblock_regions(void)
+{
+ sort(kvm_nvhe_sym(hyp_memory),
+ kvm_nvhe_sym(hyp_memblock_nr),
+ sizeof(struct hyp_memblock_region),
+ cmp_hyp_memblock,
+ NULL);
+}
+
extern bool enable_protected_kvm;
void __init reserve_kvm_hyp(void)
{
The hypervisor will need the list of memblock regions sorted by increasing start address to make look-ups more efficient. Make the host do the hard work early while it is still trusted to avoid the need for a sorting library at EL2. Signed-off-by: Quentin Perret <qperret@google.com> --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/hyp/reserved_mem.c | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+)