@@ -514,6 +514,9 @@ struct kvm_irqfd {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_IOMMU_UNMAP_PAGE _IOW(KVMIO, 0x49, __u64)
+#define KVM_IOMMU_MAP_PAGE _IOW(KVMIO, 0x50, __u64)
+
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
@@ -411,6 +411,10 @@ int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
+void kvm_iommu_unmap_page(struct kvm *kvm,
+ gfn_t base_gfn);
+int kvm_iommu_map_page(struct kvm *kvm,
+ gfn_t base_gfn);
#else /* CONFIG_IOMMU_API */
static inline int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t base_gfn,
@@ -190,23 +190,101 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t gfn = base_gfn;
pfn_t pfn;
struct iommu_domain *domain = kvm->arch.iommu_domain;
- unsigned long i;
+ unsigned long i, iommu_pages;
u64 phys;
/* check if iommu exists and in use */
if (!domain)
return;
- for (i = 0; i < npages; i++) {
+ for (i = 0, iommu_pages = 0; i < npages; i++, gfn++) {
phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
+
+ /*Because of ballooning, there can be holes in the
+ range. In that case, we simply unmap everything
+ till now, and continue forward.
+ */
+ if (!phys) {
+
+ /*No consecutive IOMMU pages here*/
+ if (iommu_pages == 0)
+ continue;
+ iommu_unmap_range(domain,
+ gfn_to_gpa(base_gfn),
+ PAGE_SIZE*iommu_pages);
+
+ /*Reset consequtive iommu range counters*/
+ base_gfn = gfn + 1;
+ iommu_pages = 0;
+ continue;
+ }
pfn = phys >> PAGE_SHIFT;
kvm_release_pfn_clean(pfn);
- gfn++;
+ ++iommu_pages;
}
- iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
+ /*Unmap the last iommu range if any*/
+ if (iommu_pages != 0)
+ iommu_unmap_range(domain,
+ gfn_to_gpa(base_gfn),
+ PAGE_SIZE * iommu_pages);
+}
+
+/*Called to map a page from IOMMU */
+int kvm_iommu_map_page(struct kvm *kvm,
+ gfn_t base_gfn)
+{
+ gfn_t gfn = base_gfn;
+ pfn_t pfn;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
+ u64 phys;
+ int rc;
+ int flags;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return 0;
+ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
+
+ /*Verify addres is not mapped already*/
+ if (phys)
+ return 0;
+ flags = IOMMU_READ | IOMMU_WRITE;
+ if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
+ flags |= IOMMU_CACHE;
+ pfn = gfn_to_pfn(kvm, gfn);
+ rc = iommu_map_range(domain,
+ gfn_to_gpa(gfn),
+ pfn_to_hpa(pfn),
+ PAGE_SIZE, flags);
+ return rc;
+}
+
+
+
+/*Called to unmap a page from IOMMU */
+void kvm_iommu_unmap_page(struct kvm *kvm,
+ gfn_t base_gfn)
+{
+ gfn_t gfn = base_gfn;
+ pfn_t pfn;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
+ u64 phys;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return;
+ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
+
+ /*Verify addres is mapped*/
+ if (!phys)
+ return;
+ pfn = phys >> PAGE_SHIFT;
+ kvm_release_pfn_clean(pfn);
+ iommu_unmap_range(domain, gfn_to_gpa(gfn), PAGE_SIZE);
}
+
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i;
@@ -2184,6 +2184,15 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EOPNOTSUPP;
break;
}
+ case KVM_IOMMU_MAP_PAGE: {
+ r = kvm_iommu_map_page(kvm, arg);
+ break;
+ }
+ case KVM_IOMMU_UNMAP_PAGE:{
+ kvm_iommu_unmap_page(kvm, arg);
+ r = 0;
+ break;
+ }
#ifdef KVM_CAP_ASSIGN_DEV_IRQ
case KVM_ASSIGN_DEV_IRQ: {
struct kvm_assigned_irq assigned_irq;