@@ -12,6 +12,9 @@
* Parameters from the trusted host:
* @mmio_addr base address of the SMMU registers
* @mmio_size size of the registers resource
+ * @caches_clean_on_power_on
+ * is it safe to elide cache and TLB invalidation commands
+ * while the SMMU is OFF
*
* Other members are filled and used at runtime by the SMMU driver.
*/
@@ -20,6 +23,7 @@ struct hyp_arm_smmu_v3_device {
phys_addr_t mmio_addr;
size_t mmio_size;
unsigned long features;
+ bool caches_clean_on_power_on;
void __iomem *base;
u32 cmdq_prod;
@@ -3,6 +3,7 @@
#define __KVM_IOMMU_H
#include <asm/kvm_host.h>
+#include <kvm/power_domain.h>
#include <linux/io-pgtable.h>
/*
@@ -10,6 +11,7 @@
* @pgtable_cfg: page table configuration
* @domains: root domain table
* @nr_domains: max number of domains (exclusive)
+ * @power_domain: power domain information
*
* Other members are filled and used at runtime by the IOMMU driver.
*/
@@ -17,8 +19,10 @@ struct kvm_hyp_iommu {
struct io_pgtable_cfg pgtable_cfg;
void **domains;
size_t nr_domains;
+ struct kvm_power_domain power_domain;
struct io_pgtable_params *pgtable;
+ bool power_is_off;
};
struct kvm_hyp_iommu_memcache {
@@ -83,6 +83,9 @@ static int smmu_add_cmd(struct hyp_arm_smmu_v3_device *smmu,
int idx = Q_IDX(smmu, smmu->cmdq_prod);
u64 *slot = smmu->cmdq_base + idx * CMDQ_ENT_DWORDS;
+ if (smmu->iommu.power_is_off)
+ return -EPIPE;
+
ret = smmu_wait_event(smmu, !smmu_cmdq_full(smmu));
if (ret)
return ret;
@@ -160,6 +163,9 @@ static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
.cfgi.leaf = true,
};
+ if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
+ return 0;
+
return smmu_send_cmd(smmu, &cmd);
}
@@ -394,6 +400,9 @@ static void smmu_tlb_flush_all(void *cookie)
.tlbi.vmid = data->domain_id,
};
+ if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
+ return;
+
WARN_ON(smmu_send_cmd(smmu, &cmd));
}
@@ -409,6 +418,9 @@ static void smmu_tlb_inv_range(struct kvm_iommu_tlb_cookie *data,
.tlbi.leaf = leaf,
};
+ if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
+ return;
+
/*
* There are no mappings at high addresses since we don't use TTB1, so
* no overflow possible.
@@ -327,10 +327,46 @@ phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id,
return phys;
}
+static int iommu_power_on(struct kvm_power_domain *pd)
+{
+ struct kvm_hyp_iommu *iommu = container_of(pd, struct kvm_hyp_iommu,
+ power_domain);
+
+ /*
+ * We currently assume that the device retains its architectural state
+ * across power off, hence no save/restore.
+ */
+ hyp_spin_lock(&iommu_lock);
+ iommu->power_is_off = false;
+ hyp_spin_unlock(&iommu_lock);
+ return 0;
+}
+
+static int iommu_power_off(struct kvm_power_domain *pd)
+{
+ struct kvm_hyp_iommu *iommu = container_of(pd, struct kvm_hyp_iommu,
+ power_domain);
+
+ hyp_spin_lock(&iommu_lock);
+ iommu->power_is_off = true;
+ hyp_spin_unlock(&iommu_lock);
+ return 0;
+}
+
+static const struct kvm_power_domain_ops iommu_power_ops = {
+ .power_on = iommu_power_on,
+ .power_off = iommu_power_off,
+};
+
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
{
+ int ret;
void *domains;
+ ret = pkvm_init_power_domain(&iommu->power_domain, &iommu_power_ops);
+ if (ret)
+ return ret;
+
domains = iommu->domains;
iommu->domains = kern_hyp_va(domains);
return pkvm_create_mappings(iommu->domains, iommu->domains +
Add power domain ops to the hypervisor IOMMU driver. We currently make these assumptions: * The register state is retained across power off. * The TLBs are clean on power on. * Another privileged software (EL3 or SCP FW) handles dependencies between SMMU and endpoints. So we just need to make sure that the CPU does not touch the SMMU registers while it is powered off. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- include/kvm/arm_smmu_v3.h | 4 +++ include/kvm/iommu.h | 4 +++ arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c | 12 +++++++ arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 36 +++++++++++++++++++++ 4 files changed, 56 insertions(+)