@@ -6979,6 +6979,12 @@ static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *s
.start = slot->base_gfn,
.end = slot->base_gfn + slot->npages,
.may_block = true,
+
+ /*
+ * All private and shared page should be zapped on memslot
+ * deletion.
+ */
+ .process = KVM_PROCESS_PRIVATE_AND_SHARED,
};
if (kvm_tdp_mmu_unmap_gfn_range(kvm, &range, false))
@@ -7479,6 +7485,12 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
+ /* Unmmap the old attribute page. */
+ if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
+ range->process = KVM_PROCESS_SHARED;
+ else
+ range->process = KVM_PROCESS_PRIVATE;
+
return kvm_unmap_gfn_range(kvm, range);
}
@@ -260,11 +260,19 @@ union kvm_mmu_notifier_arg {
unsigned long attributes;
};
+enum kvm_process {
+ BUGGY_KVM_INVALIDATION = 0,
+ KVM_PROCESS_SHARED = BIT(0),
+ KVM_PROCESS_PRIVATE = BIT(1),
+ KVM_PROCESS_PRIVATE_AND_SHARED = KVM_PROCESS_SHARED | KVM_PROCESS_PRIVATE,
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
union kvm_mmu_notifier_arg arg;
+ enum kvm_process process;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -109,6 +109,8 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
.slot = slot,
.may_block = true,
+ /* guest memfd is relevant to only private mappings. */
+ .process = KVM_PROCESS_PRIVATE,
};
if (!found_memslot) {
@@ -635,6 +635,11 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
*/
gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;
+ /*
+ * HVA-based notifications aren't relevant to private
+ * mappings as they don't have a userspace mapping.
+ */
+ gfn_range.process = KVM_PROCESS_SHARED;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
@@ -2453,6 +2458,14 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;
+ /*
+ * If/when KVM supports more attributes beyond private .vs shared, this
+ * _could_ set exclude_{private,shared} appropriately if the entire target
+ * range already has the desired private vs. shared state (it's unclear
+ * if that is a net win). For now, KVM reaches this point if and only
+ * if the private flag is being toggled, i.e. all mappings are in play.
+ */
+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);
@@ -2509,6 +2522,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
struct kvm_mmu_notifier_range pre_set_range = {
.start = start,
.end = end,
+ .arg.attributes = attributes,
.handler = kvm_pre_set_memory_attributes,
.on_lock = kvm_mmu_invalidate_begin,
.flush_on_ret = true,