@@ -170,6 +170,13 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
void __init free_hyp_pgds(void);
+void kvm_stage2_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+void kvm_stage2_set_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+void kvm_stage2_sync_dirty(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+
void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
void kvm_uninit_stage2_mmu(struct kvm *kvm);
@@ -1149,6 +1149,36 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_split_huge_pages(kvm, start, end);
}
+void kvm_stage2_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_clear_dbm);
+}
+
+void kvm_stage2_set_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_set_dbm, false);
+}
+
+void kvm_stage2_sync_dirty(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_sync_dirty, false);
+}
+
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
{
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);