@@ -20,6 +20,7 @@ config ARM
select HAS_DEVICE_TREE
select HAS_PASSTHROUGH
select HAS_PDX
+ select IOMMU_FORCE_PT_SHARE
config ARCH_DEFCONFIG
string
@@ -13,3 +13,6 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing the
ARM SMMU architecture.
endif
+
+config IOMMU_FORCE_PT_SHARE
+ bool
@@ -49,7 +49,11 @@ int8_t __hwdom_initdata iommu_hwdom_reserved = -1;
* default until we find a good solution to resolve it.
*/
bool_t __read_mostly iommu_intpost;
-bool_t __read_mostly iommu_hap_pt_share = 1;
+
+#ifndef iommu_hap_pt_share
+bool __read_mostly iommu_hap_pt_share = true;
+#endif
+
bool_t __read_mostly iommu_debug;
bool_t __read_mostly amd_iommu_perdev_intremap = 1;
@@ -102,8 +106,10 @@ static int __init parse_iommu_param(const char *s)
iommu_hwdom_passthrough = val;
else if ( (val = parse_boolean("dom0-strict", s, ss)) >= 0 )
iommu_hwdom_strict = val;
+#ifndef iommu_hap_pt_share
else if ( (val = parse_boolean("sharept", s, ss)) >= 0 )
iommu_hap_pt_share = val;
+#endif
else
rc = -EINVAL;
@@ -20,9 +20,6 @@ struct arch_iommu
void *priv;
};
-/* Always share P2M Table between the CPU and the IOMMU */
-#define iommu_use_hap_pt(d) is_iommu_enabled(d)
-
const struct iommu_ops *iommu_get_ops(void);
void iommu_set_ops(const struct iommu_ops *ops);
@@ -86,10 +86,6 @@ struct iommu_init_ops {
extern const struct iommu_init_ops *iommu_init_ops;
-/* Are we using the domain P2M table as its IOMMU pagetable? */
-#define iommu_use_hap_pt(d) \
- (hap_enabled(d) && is_iommu_enabled(d) && iommu_hap_pt_share)
-
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
int iommu_setup_hpet_msi(struct msi_desc *);
@@ -55,7 +55,13 @@ static inline bool_t dfn_eq(dfn_t x, dfn_t y)
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_verbose, iommu_igfx;
extern bool_t iommu_snoop, iommu_qinval, iommu_intremap, iommu_intpost;
-extern bool_t iommu_hap_pt_share;
+
+#ifdef CONFIG_IOMMU_FORCE_PT_SHARE
+#define iommu_hap_pt_share true
+#else
+extern bool iommu_hap_pt_share;
+#endif
+
extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
@@ -273,6 +279,17 @@ struct domain_iommu {
#define iommu_set_feature(d, f) set_bit(f, dom_iommu(d)->features)
#define iommu_clear_feature(d, f) clear_bit(f, dom_iommu(d)->features)
+/* Are we using the domain P2M table as its IOMMU pagetable? */
+#define iommu_use_hap_pt(d) \
+ (hap_enabled(d) && is_iommu_enabled(d) && iommu_hap_pt_share)
+
+/* Does the IOMMU pagetable need to be kept synchronized with the P2M */
+#ifdef CONFIG_HAS_PASSTHROUGH
+#define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
+#else
+#define need_iommu_pt_sync(d) ({ (void)(d); false; })
+#endif
+
int __must_check iommu_suspend(void);
void iommu_resume(void);
void iommu_crash_shutdown(void);
@@ -965,12 +965,6 @@ static inline bool is_hwdom_pinned_vcpu(const struct vcpu *v)
cpumask_weight(v->cpu_hard_affinity) == 1);
}
-#ifdef CONFIG_HAS_PASSTHROUGH
-#define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
-#else
-#define need_iommu_pt_sync(d) false
-#endif
-
static inline bool is_vcpu_online(const struct vcpu *v)
{
return !test_bit(_VPF_down, &v->pause_flags);