@@ -191,13 +191,19 @@
#define VTD_ECAP_PT (1ULL << 6)
#define VTD_ECAP_SC (1ULL << 7)
#define VTD_ECAP_MHMV (15ULL << 20)
+#define VTD_ECAP_NEST (1ULL << 26)
#define VTD_ECAP_SRS (1ULL << 31)
#define VTD_ECAP_EAFS (1ULL << 34)
+#define VTD_ECAP_PSS(val) (((val) & 0x1fULL) << 35)
#define VTD_ECAP_PASID (1ULL << 40)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SLTS (1ULL << 46)
#define VTD_ECAP_FLTS (1ULL << 47)
+#define VTD_ECAP_MASK (VTD_ECAP_SRS | VTD_ECAP_EAFS)
+#define VTD_GET_PSS(val) (((val) >> 35) & 0x1f)
+#define VTD_ECAP_PSS_MASK (0x1fULL << 35)
+
/* CAP_REG */
/* (offset >> 4) << 24 */
#define VTD_CAP_FRO (DMAR_FRCD_REG_OFFSET << 20)
@@ -214,11 +220,15 @@
#define VTD_CAP_DRAIN_WRITE (1ULL << 54)
#define VTD_CAP_DRAIN_READ (1ULL << 55)
#define VTD_CAP_FL1GP (1ULL << 56)
+#define VTD_CAP_FL5LP (1ULL << 60)
#define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE)
#define VTD_CAP_CM (1ULL << 7)
#define VTD_PASID_ID_SHIFT 20
#define VTD_PASID_ID_MASK ((1ULL << VTD_PASID_ID_SHIFT) - 1)
+
+#define VTD_CAP_MASK (VTD_CAP_FL1GP | VTD_CAP_FL5LP)
+
/* Supported Adjusted Guest Address Widths */
#define VTD_CAP_SAGAW_SHIFT 8
#define VTD_CAP_SAGAW_MASK (0x1fULL << VTD_CAP_SAGAW_SHIFT)
@@ -3819,19 +3819,82 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
return vtd_dev_as;
}
+static bool vtd_check_hw_info(IntelIOMMUState *s, struct iommu_hw_info_vtd *vtd,
+ Error **errp)
+{
+ if (!(vtd->ecap_reg & VTD_ECAP_NEST)) {
+ error_setg(errp, "Need nested translation on host in modern mode");
+ return false;
+ }
+
+ return true;
+}
+
+/* cap/ecap are readonly after vIOMMU finalized */
+static bool vtd_check_hw_info_finalized(IntelIOMMUState *s,
+ struct iommu_hw_info_vtd *vtd,
+ Error **errp)
+{
+ if (s->cap & ~vtd->cap_reg & VTD_CAP_MASK) {
+ error_setg(errp, "vIOMMU cap %lx isn't compatible with host %llx",
+ s->cap, vtd->cap_reg);
+ return false;
+ }
+
+ if (s->ecap & ~vtd->ecap_reg & VTD_ECAP_MASK) {
+ error_setg(errp, "vIOMMU ecap %lx isn't compatible with host %llx",
+ s->ecap, vtd->ecap_reg);
+ return false;
+ }
+
+ if (s->ecap & vtd->ecap_reg & VTD_ECAP_PASID &&
+ VTD_GET_PSS(s->ecap) > VTD_GET_PSS(vtd->ecap_reg)) {
+ error_setg(errp, "vIOMMU pasid bits %lu > host pasid bits %llu",
+ VTD_GET_PSS(s->ecap), VTD_GET_PSS(vtd->ecap_reg));
+ return false;
+ }
+
+ return true;
+}
+
static bool vtd_sync_hw_info(IntelIOMMUState *s, struct iommu_hw_info_vtd *vtd,
Error **errp)
{
- uint64_t addr_width;
+ uint64_t cap, ecap, addr_width, pasid_bits;
- addr_width = (vtd->cap_reg >> 16) & 0x3fULL;
- if (s->aw_bits > addr_width) {
- error_setg(errp, "User aw-bits: %u > host address width: %lu",
- s->aw_bits, addr_width);
+ if (!s->scalable_modern) {
+ addr_width = (vtd->cap_reg >> 16) & 0x3fULL;
+ if (s->aw_bits > addr_width) {
+ error_setg(errp, "User aw-bits: %u > host address width: %lu",
+ s->aw_bits, addr_width);
+ return false;
+ }
+ return true;
+ }
+
+ if (!vtd_check_hw_info(s, vtd, errp)) {
return false;
}
- /* TODO: check and sync host cap/ecap into vIOMMU cap/ecap */
+ if (s->cap_finalized) {
+ return vtd_check_hw_info_finalized(s, vtd, errp);
+ }
+
+ /* sync host cap/ecap to vIOMMU */
+
+ cap = s->host_cap & vtd->cap_reg & VTD_CAP_MASK;
+ s->host_cap &= ~VTD_CAP_MASK;
+ s->host_cap |= cap;
+ ecap = s->host_ecap & vtd->ecap_reg & VTD_ECAP_MASK;
+ s->host_ecap &= ~VTD_ECAP_MASK;
+ s->host_ecap |= ecap;
+
+ pasid_bits = VTD_GET_PSS(vtd->ecap_reg);
+ if (s->host_ecap & VTD_ECAP_PASID &&
+ VTD_GET_PSS(s->host_ecap) > pasid_bits) {
+ s->host_ecap &= ~VTD_ECAP_PSS_MASK;
+ s->host_ecap |= VTD_ECAP_PSS(pasid_bits);
+ }
return true;
}
@@ -3873,9 +3936,13 @@ static int vtd_dev_set_iommu_device(PCIBus *bus, void *opaque, int32_t devfn,
assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
- /* None IOMMUFD case */
- if (!idev) {
+ if (!s->scalable_modern && !idev) {
+ /* Legacy vIOMMU and non-IOMMUFD backend */
return 0;
+ } else if (!idev) {
+ /* Modern vIOMMU and non-IOMMUFD backend */
+ error_setg(errp, "Need IOMMUFD backend to setup nested page table");
+ return -1;
}
if (!vtd_check_idev(s, idev, errp)) {