@@ -834,6 +834,30 @@ static bool_t __init set_iommu_interrupt
return 1;
}
+int iov_adjust_irq_affinities(void)
+{
+ const struct amd_iommu *iommu;
+
+ if ( !iommu_enabled )
+ return 0;
+
+ for_each_amd_iommu ( iommu )
+ {
+ struct irq_desc *desc = irq_to_desc(iommu->msi.irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if ( iommu->ctrl.int_cap_xt_en )
+ set_x2apic_affinity(desc, &cpu_online_map);
+ else
+ set_msi_affinity(desc, &cpu_online_map);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+__initcall(iov_adjust_irq_affinities);
+
/*
* Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
* Workaround:
@@ -1047,7 +1071,7 @@ static void * __init allocate_ppr_log(st
IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log");
}
-static int __init amd_iommu_init_one(struct amd_iommu *iommu)
+static int __init amd_iommu_init_one(struct amd_iommu *iommu, bool intr)
{
if ( allocate_cmd_buffer(iommu) == NULL )
goto error_out;
@@ -1058,7 +1082,7 @@ static int __init amd_iommu_init_one(str
if ( iommu->features.flds.ppr_sup && !allocate_ppr_log(iommu) )
goto error_out;
- if ( !set_iommu_interrupt_handler(iommu) )
+ if ( intr && !set_iommu_interrupt_handler(iommu) )
goto error_out;
/* To make sure that device_table.buffer has been successfully allocated */
@@ -1285,7 +1309,7 @@ static int __init amd_iommu_prepare_one(
return 0;
}
-int __init amd_iommu_init(void)
+int __init amd_iommu_prepare(void)
{
struct amd_iommu *iommu;
int rc = -ENODEV;
@@ -1300,9 +1324,14 @@ int __init amd_iommu_init(void)
if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) )
goto error_out;
+ /* Have we been here before? */
+ if ( ivhd_type )
+ return 0;
+
rc = amd_iommu_get_supported_ivhd_type();
if ( rc < 0 )
goto error_out;
+ BUG_ON(!rc);
ivhd_type = rc;
rc = amd_iommu_get_ivrs_dev_entries();
@@ -1321,9 +1350,33 @@ int __init amd_iommu_init(void)
}
rc = amd_iommu_update_ivrs_mapping_acpi();
+
+ error_out:
+ if ( rc )
+ {
+ amd_iommu_init_cleanup();
+ ivhd_type = 0;
+ }
+
+ return rc;
+}
+
+int __init amd_iommu_init(bool xt)
+{
+ struct amd_iommu *iommu;
+ int rc = amd_iommu_prepare();
+
if ( rc )
goto error_out;
+ for_each_amd_iommu ( iommu )
+ {
+ /* NB: There's no need to actually write these out right here. */
+ iommu->ctrl.ga_en |= xt;
+ iommu->ctrl.xt_en = xt;
+ iommu->ctrl.int_cap_xt_en = xt;
+ }
+
/* initialize io-apic interrupt remapping entries */
if ( iommu_intremap )
rc = amd_iommu_setup_ioapic_remapping();
@@ -1346,7 +1399,7 @@ int __init amd_iommu_init(void)
/* per iommu initialization */
for_each_amd_iommu ( iommu )
{
- rc = amd_iommu_init_one(iommu);
+ rc = amd_iommu_init_one(iommu, !xt);
if ( rc )
goto error_out;
}
@@ -1358,6 +1411,40 @@ error_out:
return rc;
}
+int __init amd_iommu_init_interrupt(void)
+{
+ struct amd_iommu *iommu;
+ int rc = 0;
+
+ for_each_amd_iommu ( iommu )
+ {
+ struct irq_desc *desc;
+
+ if ( !set_iommu_interrupt_handler(iommu) )
+ {
+ rc = -EIO;
+ break;
+ }
+
+ desc = irq_to_desc(iommu->msi.irq);
+
+ spin_lock(&desc->lock);
+ ASSERT(iommu->ctrl.int_cap_xt_en);
+ set_x2apic_affinity(desc, &cpu_online_map);
+ spin_unlock(&desc->lock);
+
+ set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
+
+ if ( iommu->features.flds.ppr_sup )
+ set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_ENABLED);
+ }
+
+ if ( rc )
+ amd_iommu_init_cleanup();
+
+ return rc;
+}
+
static void invalidate_all_domain_pages(void)
{
struct domain *d;
@@ -796,6 +796,40 @@ void* __init amd_iommu_alloc_intremap_ta
return tb;
}
+bool __init iov_supports_xt(void)
+{
+ unsigned int apic;
+ struct amd_iommu *iommu;
+
+ if ( !iommu_enable || !iommu_intremap || !cpu_has_cx16 )
+ return false;
+
+ if ( amd_iommu_prepare() )
+ return false;
+
+ for_each_amd_iommu ( iommu )
+ if ( !iommu->features.flds.ga_sup || !iommu->features.flds.xt_sup )
+ return false;
+
+ for ( apic = 0; apic < nr_ioapics; apic++ )
+ {
+ unsigned int idx = ioapic_id_to_index(IO_APIC_ID(apic));
+
+ if ( idx == MAX_IO_APICS )
+ return false;
+
+ if ( !find_iommu_for_device(ioapic_sbdf[idx].seg,
+ ioapic_sbdf[idx].bdf) )
+ {
+ AMD_IOMMU_DEBUG("No IOMMU for IO-APIC %#x (ID %x)\n",
+ apic, IO_APIC_ID(apic));
+ return false;
+ }
+ }
+
+ return true;
+}
+
int __init amd_setup_hpet_msi(struct msi_desc *msi_desc)
{
spinlock_t *lock;
@@ -170,7 +170,8 @@ static int __init iov_detect(void)
if ( !iommu_enable && !iommu_intremap )
return 0;
- if ( amd_iommu_init() != 0 )
+ else if ( (init_done ? amd_iommu_init_interrupt()
+ : amd_iommu_init(false)) != 0 )
{
printk("AMD-Vi: Error initialization\n");
return -ENODEV;
@@ -183,6 +184,25 @@ static int __init iov_detect(void)
return scan_pci_devices();
}
+static int iov_enable_xt(void)
+{
+ int rc;
+
+ if ( system_state >= SYS_STATE_active )
+ return 0;
+
+ if ( (rc = amd_iommu_init(true)) != 0 )
+ {
+ printk("AMD-Vi: Error %d initializing for x2APIC mode\n", rc);
+ /* -ENXIO has special meaning to the caller - convert it. */
+ return rc != -ENXIO ? rc : -ENODATA;
+ }
+
+ init_done = true;
+
+ return 0;
+}
+
int amd_iommu_alloc_root(struct domain_iommu *hd)
{
if ( unlikely(!hd->arch.root_table) )
@@ -559,11 +579,13 @@ static const struct iommu_ops __initcons
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
+ .enable_x2apic = iov_enable_xt,
.update_ire_from_apic = amd_iommu_ioapic_update_ire,
.update_ire_from_msi = amd_iommu_msi_msg_update_ire,
.read_apic_from_ire = amd_iommu_read_ioapic_from_ire,
.read_msi_from_ire = amd_iommu_read_msi_from_ire,
.setup_hpet_msi = amd_setup_hpet_msi,
+ .adjust_irq_affinities = iov_adjust_irq_affinities,
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
.share_p2m = amd_iommu_share_p2m,
@@ -574,4 +596,5 @@ static const struct iommu_ops __initcons
static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
.ops = &_iommu_ops,
.setup = iov_detect,
+ .supports_x2apic = iov_supports_xt,
};
@@ -48,8 +48,11 @@ int amd_iommu_detect_acpi(void);
void get_iommu_features(struct amd_iommu *iommu);
/* amd-iommu-init functions */
-int amd_iommu_init(void);
+int amd_iommu_prepare(void);
+int amd_iommu_init(bool xt);
+int amd_iommu_init_interrupt(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
+int iov_adjust_irq_affinities(void);
/* mapping functions */
int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
@@ -96,6 +99,7 @@ void amd_iommu_flush_all_caches(struct a
struct amd_iommu *find_iommu_for_device(int seg, int bdf);
/* interrupt remapping */
+bool iov_supports_xt(void);
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(unsigned long **);
int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
In order for the CPUs to use x2APIC mode, the IOMMU(s) first need to be switched into suitable state. The post-AP-bringup IRQ affinity adjustment is done also for the non- x2APIC case. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- TBD: Instead of the system_state check in iov_enable_xt() the function could also zap its own hook pointer, at which point it could also become __init. This would, however, require that either resume_x2apic() be bound to ignore iommu_enable_x2apic() errors forever, or that iommu_enable_x2apic() be slightly re-arranged to not return -EOPNOTSUPP when finding a NULL hook during resume.