@@ -320,15 +320,17 @@ void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int idx;
- struct kvm_memslots *slots;
+ struct kvm_memslots *slots, *smm_slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
-
kvm_for_each_memslot(memslot, slots)
kvm_iommu_unmap_pages(kvm, memslot);
+ smm_slots = __kvm_memslots(kvm, 1);
+ kvm_for_each_memslot(memslot, smm_slots)
+ kvm_iommu_unmap_pages(kvm, memslot);
srcu_read_unlock(&kvm->srcu, idx);
if (kvm->arch.iommu_noncoherent)
or pages are not unmaped and freed Signed-off-by: herongguang <herongguang.he@huawei.com> --- arch/x86/kvm/iommu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) Well, do we should change pci-assign to not map SMM slots instead? Like vfio.