@@ -1073,6 +1073,19 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
}
}
+static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
+{
+ /*
+ * VM_SHARED mappings are not allowed with MTE to avoid races
+ * when updating the PG_mte_tagged page flag, see
+ * sanitise_mte_tags for more details.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ return false;
+
+ return vma->vm_flags & VM_MTE_ALLOWED;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -1249,9 +1262,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
- /* Check the VMM hasn't introduced a new VM_SHARED VMA */
- if ((vma->vm_flags & VM_MTE_ALLOWED) &&
- !(vma->vm_flags & VM_SHARED)) {
+ /* Check the VMM hasn't introduced a new disallowed VMA */
+ if (kvm_vma_mte_allowed(vma)) {
sanitise_mte_tags(kvm, pfn, vma_pagesize);
} else {
ret = -EFAULT;
@@ -1695,12 +1707,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (!vma)
break;
- /*
- * VM_SHARED mappings are not allowed with MTE to avoid races
- * when updating the PG_mte_tagged page flag, see
- * sanitise_mte_tags for more details.
- */
- if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
+ if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) {
ret = -EINVAL;
break;
}