@@ -21,6 +21,7 @@ obj-y += rtc.o
obj-y += save.o
obj-y += stdvga.o
obj-y += vioapic.o
+obj-y += vvtd.o
obj-y += viridian.o
obj-y += vlapic.o
obj-y += vmsi.o
new file mode 100644
@@ -0,0 +1,173 @@
+/*
+ * vvtd.c
+ *
+ * virtualize VTD for HVM.
+ *
+ * Copyright (C) 2017 Chao Gao, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms and conditions of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/domain_page.h>
+#include <xen/sched.h>
+#include <xen/types.h>
+#include <xen/viommu.h>
+#include <xen/xmalloc.h>
+#include <asm/current.h>
+#include <asm/hvm/domain.h>
+#include <asm/page.h>
+#include <public/viommu.h>
+
+#include "../../../drivers/passthrough/vtd/iommu.h"
+
+struct hvm_hw_vvtd_regs {
+ uint8_t data[1024];
+};
+
+/* Status field of struct vvtd */
+#define VIOMMU_STATUS_IRQ_REMAPPING_ENABLED (1 << 0)
+#define VIOMMU_STATUS_DMA_REMAPPING_ENABLED (1 << 1)
+
+struct vvtd {
+ /* VIOMMU_STATUS_XXX_REMAPPING_ENABLED */
+ int status;
+ /* Base address of remapping hardware register-set */
+ uint64_t base_addr;
+ /* Point back to the owner domain */
+ struct domain *domain;
+ struct hvm_hw_vvtd_regs *regs;
+ struct page_info *regs_page;
+};
+
+static inline void vvtd_set_reg(struct vvtd *vtd, uint32_t reg,
+ uint32_t value)
+{
+ *((uint32_t *)(&vtd->regs->data[reg])) = value;
+}
+
+static inline uint32_t vvtd_get_reg(struct vvtd *vtd, uint32_t reg)
+{
+ return *((uint32_t *)(&vtd->regs->data[reg]));
+}
+
+static inline uint8_t vvtd_get_reg_byte(struct vvtd *vtd, uint32_t reg)
+{
+ return *((uint8_t *)(&vtd->regs->data[reg]));
+}
+
+#define vvtd_get_reg_quad(vvtd, reg, val) do { \
+ (val) = vvtd_get_reg(vvtd, (reg) + 4 ); \
+ (val) = (val) << 32; \
+ (val) += vvtd_get_reg(vvtd, reg); \
+} while(0)
+#define vvtd_set_reg_quad(vvtd, reg, val) do { \
+ vvtd_set_reg(vvtd, reg, (uint32_t)((val) & 0xffffffff)); \
+ vvtd_set_reg(vvtd, (reg) + 4, (uint32_t)((val) >> 32)); \
+} while(0)
+
+static void vvtd_reset(struct vvtd *vvtd, uint64_t capability)
+{
+ uint64_t cap, ecap;
+
+ cap = DMA_CAP_NFR | DMA_CAP_SLLPS | DMA_CAP_FRO | \
+ DMA_CAP_MGAW | DMA_CAP_SAGAW | DMA_CAP_ND;
+ ecap = DMA_ECAP_IR | DMA_ECAP_EIM | DMA_ECAP_QI;
+ vvtd_set_reg(vvtd, DMAR_VER_REG, 0x10UL);
+ vvtd_set_reg_quad(vvtd, DMAR_CAP_REG, cap);
+ vvtd_set_reg_quad(vvtd, DMAR_ECAP_REG, ecap);
+ vvtd_set_reg(vvtd, DMAR_GCMD_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_GSTS_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_RTADDR_REG, 0);
+ vvtd_set_reg_quad(vvtd, DMAR_CCMD_REG, 0x0ULL);
+ vvtd_set_reg(vvtd, DMAR_FSTS_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_FECTL_REG, 0x80000000UL);
+ vvtd_set_reg(vvtd, DMAR_FEDATA_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_FEADDR_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_FEUADDR_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_PMEN_REG, 0);
+ vvtd_set_reg_quad(vvtd, DMAR_IQH_REG, 0x0ULL);
+ vvtd_set_reg_quad(vvtd, DMAR_IQT_REG, 0x0ULL);
+ vvtd_set_reg_quad(vvtd, DMAR_IQA_REG, 0x0ULL);
+ vvtd_set_reg(vvtd, DMAR_ICS_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_IECTL_REG, 0x80000000UL);
+ vvtd_set_reg(vvtd, DMAR_IEDATA_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_IEADDR_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_IEUADDR_REG, 0);
+ vvtd_set_reg(vvtd, DMAR_IRTA_REG, 0);
+}
+
+static struct vvtd *__vvtd_create(struct domain *d,
+ uint64_t base_addr,
+ uint64_t cap)
+{
+ struct vvtd *vvtd;
+
+ if ( !is_hvm_domain(d) )
+ return 0;
+
+ vvtd = xmalloc_bytes(sizeof(struct vvtd));
+ if ( vvtd == NULL )
+ return NULL;
+
+ vvtd->regs_page = alloc_domheap_page(d, MEMF_no_owner);
+ if ( vvtd->regs_page == NULL )
+ goto out1;
+
+ vvtd->regs = __map_domain_page_global(vvtd->regs_page);
+ if ( vvtd->regs == NULL )
+ goto out2;
+ clear_page(vvtd->regs);
+
+ vvtd_reset(vvtd, cap);
+ vvtd->base_addr = base_addr;
+ vvtd->domain = d;
+ vvtd->status = 0;
+ return vvtd;
+
+out2:
+ free_domheap_page(vvtd->regs_page);
+out1:
+ xfree(vvtd);
+ return NULL;
+}
+
+static void __vvtd_destroy(struct vvtd *vvtd)
+{
+ unmap_domain_page_global(vvtd->regs);
+ free_domheap_page(vvtd->regs_page);
+ xfree(vvtd);
+}
+
+static u64 vvtd_query_caps(struct domain *d)
+{
+ return VIOMMU_CAP_IRQ_REMAPPING;
+}
+
+static int vvtd_create(struct domain *d, struct viommu *viommu)
+{
+ viommu->priv = (void *)__vvtd_create(d, viommu->base_address, viommu->caps);
+ return viommu->priv ? 0 : -ENOMEM;
+}
+
+static int vvtd_destroy(struct viommu *viommu)
+{
+ if ( viommu->priv )
+ __vvtd_destroy(viommu->priv);
+ return 0;
+}
+
+struct viommu_ops vvtd_hvm_vmx_ops = {
+ .query_caps = vvtd_query_caps,
+ .create = vvtd_create,
+ .destroy = vvtd_destroy
+};
@@ -23,31 +23,54 @@
#include <asm/msi.h>
/*
- * Intel IOMMU register specification per version 1.0 public spec.
+ * Intel IOMMU register specification per version 2.4 public spec.
*/
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* invalidation queue head */
-#define DMAR_IQT_REG 0x88 /* invalidation queue tail */
-#define DMAR_IQA_REG 0x90 /* invalidation queue addr */
-#define DMAR_IRTA_REG 0xB8 /* intr remap */
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* invalidation queue head */
+#define DMAR_IQT_REG 0x88 /* invalidation queue tail */
+#define DMAR_IQT_REG_HI 0x8c
+#define DMAR_IQA_REG 0x90 /* invalidation queue addr */
+#define DMAR_IQA_REG_HI 0x94
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status */
+#define DMAR_IECTL_REG 0xa0 /* Invalidation event control */
+#define DMAR_IEDATA_REG 0xa4 /* Invalidation event data */
+#define DMAR_IEADDR_REG 0xa8 /* Invalidation event address */
+#define DMAR_IEUADDR_REG 0xac /* Invalidation event address */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr */
+#define DMAR_IRTA_REG_HI 0xbc
+#define DMAR_PQH_REG 0xc0 /* Page request queue head */
+#define DMAR_PQH_REG_HI 0xc4
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail*/
+#define DMAR_PQT_REG_HI 0xcc
+#define DMAR_PQA_REG 0xd0 /* Page request queue address */
+#define DMAR_PQA_REG_HI 0xd4
+#define DMAR_PRS_REG 0xdc /* Page request status */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event data */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event address */
+#define DMAR_PEUADDR_REG 0xec /* Page event upper address */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability */
+#define DMAR_MTRRCAP_REG_HI 0x104
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type */
+#define DMAR_MTRRDEF_REG_HI 0x10c
#define OFFSET_STRIDE (9)
#define dmar_readl(dmar, reg) readl((dmar) + (reg))
@@ -58,6 +81,31 @@
#define VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define VER_MINOR(v) ((v) & 0x0f)
+/* CAP_REG */
+/* (offset >> 4) << 24 */
+#define DMA_DOMAIN_ID_SHIFT 16 /* 16-bit domain id for 64K domains */
+#define DMA_DOMAIN_ID_MASK ((1UL << DMA_DOMAIN_ID_SHIFT) - 1)
+#define DMA_CAP_ND (((DMA_DOMAIN_ID_SHIFT - 4) / 2) & 7ULL)
+#define DMA_MGAW 39 /* Maximum Guest Address Width */
+#define DMA_CAP_MGAW (((DMA_MGAW - 1) & 0x3fULL) << 16)
+#define DMA_MAMV 18ULL
+#define DMA_CAP_MAMV (DMA_MAMV << 48)
+#define DMA_CAP_PSI (1ULL << 39)
+#define DMA_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
+#define DMAR_FRCD_REG_NR 1ULL
+#define DMA_CAP_FRO_OFFSET 0x220ULL
+#define DMA_CAP_FRO (DMA_CAP_FRO_OFFSET << 20)
+#define DMA_CAP_NFR ((DMAR_FRCD_REG_NR - 1) << 40)
+
+/* Supported Adjusted Guest Address Widths */
+#define DMA_CAP_SAGAW_SHIFT 8
+#define DMA_CAP_SAGAW_MASK (0x1fULL << DMA_CAP_SAGAW_SHIFT)
+ /* 39-bit AGAW, 3-level page-table */
+#define DMA_CAP_SAGAW_39bit (0x2ULL << DMA_CAP_SAGAW_SHIFT)
+ /* 48-bit AGAW, 4-level page-table */
+#define DMA_CAP_SAGAW_48bit (0x4ULL << DMA_CAP_SAGAW_SHIFT)
+#define DMA_CAP_SAGAW DMA_CAP_SAGAW_39bit
+
/*
* Decoding Capability Register
*/
@@ -89,6 +137,14 @@
#define cap_afl(c) (((c) >> 3) & 1)
#define cap_ndoms(c) (1 << (4 + 2 * ((c) & 0x7)))
+/* ECAP_REG */
+/* (offset >> 4) << 8 */
+#define DMA_ECAP_QI (1ULL << 1)
+/* Interrupt Remapping support */
+#define DMA_ECAP_IR (1ULL << 3)
+#define DMA_ECAP_EIM (1ULL << 4)
+#define DMA_ECAP_MHMV (15ULL << 20)
+
/*
* Extended Capability Register
*/
@@ -22,6 +22,9 @@
#include <xen/viommu.h>
#include <asm/types.h>
+#include <asm/processor.h>
+
+extern struct viommu_ops vvtd_hvm_vmx_ops;
struct irq_remapping_info
{
@@ -48,6 +51,12 @@ struct irq_remapping_request
static inline const struct viommu_ops *viommu_get_ops(void)
{
+ /*
+ * Don't mean that viommu relies on hardward cpu vendor, but just
+ * limit the usage to minimize the impact to existing code.
+ */
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ return &vvtd_hvm_vmx_ops;
return NULL;
}