@@ -223,18 +223,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
-/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries
- * @kvm: pointer to kvm structure.
- *
- * Interface to HYP function to flush all VM TLB entries without address
- * parameter.
- */
-static inline void kvm_flush_remote_tlbs(struct kvm *kvm)
-{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@@ -740,7 +740,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
-#ifdef CONFIG_ARM
bool is_dirty = false;
int r;
@@ -753,9 +752,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_unlock(&kvm->slots_lock);
return r;
-#else /* arm64 */
- return -EINVAL;
-#endif
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
@@ -52,11 +52,18 @@ static phys_addr_t hyp_idmap_vector;
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{
-#ifdef CONFIG_ARM
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
-#else
- return false;
-#endif
+}
+
+/**
+ * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
+ * @kvm: pointer to kvm structure.
+ *
+ * Interface to HYP function to flush all VM TLB entries
+ */
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+ kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
}
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -885,7 +892,6 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn);
}
-#ifdef CONFIG_ARM
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
@@ -1030,7 +1036,6 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
stage2_wp_range(kvm, start, end);
}
-#endif
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1446,7 +1451,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
-#ifdef CONFIG_ARM
/*
* At this point memslot has been committed and there is an
* allocated dirty_bitmap[], dirty pages will be be tracked while the
@@ -1454,7 +1458,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
kvm_mmu_wp_memory_region(kvm, mem->slot);
-#endif
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -22,10 +22,12 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
select KVM_ARM_VGIC
select KVM_ARM_TIMER
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
---help---
Support hosting virtualized guest machines.