@@ -181,6 +181,7 @@ ifeq ($(ARCH), arm64)
OBJS += arm/aarch64/arm-cpu.o
OBJS += arm/aarch64/kvm-cpu.o
OBJS += arm/aarch64/kvm.o
+ OBJS += arm/aarch64/spe.o
ARCH_INCLUDE := $(HDRS_ARM_COMMON)
ARCH_INCLUDE += -Iarm/aarch64/include
@@ -1,6 +1,7 @@
#include "kvm/fdt.h"
#include "kvm/kvm.h"
#include "kvm/kvm-cpu.h"
+#include "kvm/spe.h"
#include "kvm/util.h"
#include "arm-common/gic.h"
@@ -17,6 +18,7 @@ static void generate_fdt_nodes(void *fdt, struct kvm *kvm)
gic__generate_fdt_nodes(fdt, kvm->cfg.arch.irqchip);
timer__generate_fdt_nodes(fdt, kvm, timer_interrupts);
pmu__generate_fdt_nodes(fdt, kvm);
+ spe__generate_fdt_nodes(fdt, kvm);
}
static int arm_cpu__vcpu_init(struct kvm_cpu *vcpu)
@@ -6,6 +6,8 @@
"Run AArch32 guest"), \
OPT_BOOLEAN('\0', "pmu", &(cfg)->has_pmuv3, \
"Create PMUv3 device"), \
+ OPT_BOOLEAN('\0', "spe", &(cfg)->has_spe, \
+ "Create SPE device"), \
OPT_U64('\0', "kaslr-seed", &(cfg)->kaslr_seed, \
"Specify random seed for Kernel Address Space " \
"Layout Randomization (KASLR)"),
new file mode 100644
@@ -0,0 +1,7 @@
+#ifndef KVM__KVM_SPE_H
+#define KVM__KVM_SPE_H
+
+#define KVM_ARM_SPE_IRQ 21
+
+void spe__generate_fdt_nodes(void *fdt, struct kvm *kvm);
+#endif /* KVM__KVM_SPE_H */
@@ -140,6 +140,11 @@ void kvm_cpu__select_features(struct kvm *kvm, struct kvm_vcpu_init *init)
/* Enable SVE if available */
if (kvm__supports_extension(kvm, KVM_CAP_ARM_SVE))
init->features[0] |= 1UL << KVM_ARM_VCPU_SVE;
+
+ /* Enable SPE if requested */
+ if (kvm->cfg.arch.has_spe &&
+ kvm__supports_extension(kvm, KVM_CAP_ARM_SPE))
+ init->features[0] |= 1UL << KVM_ARM_VCPU_SPE;
}
int kvm_cpu__configure_features(struct kvm_cpu *vcpu)
@@ -1,4 +1,5 @@
#include "kvm/kvm.h"
+#include "kvm/spe.h"
#include <asm/image.h>
#include <sys/mman.h>
@@ -49,5 +50,17 @@ fail:
void kvm__arch_delete_ram(struct kvm *kvm)
{
+ struct kvm_enable_cap unlock_mem = {
+ .cap = KVM_CAP_ARM_LOCK_USER_MEMORY_REGION,
+ .flags = KVM_ARM_LOCK_USER_MEMORY_REGION_FLAGS_UNLOCK,
+ .args[1] = KVM_ARM_UNLOCK_MEM_ALL,
+ };
+ int ret;
+
+ if (kvm->cfg.arch.has_spe) {
+ ret = ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &unlock_mem);
+ if (ret == -1)
+ perror("KVM_CAP_ARM_LOCK_USER_MEMORY_REGION");
+ }
munmap(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size);
}
new file mode 100644
@@ -0,0 +1,139 @@
+#include <stdio.h>
+
+#include <sys/resource.h>
+
+#include <linux/kvm.h>
+#include <linux/list.h>
+
+#include "kvm/fdt.h"
+#include "kvm/kvm.h"
+#include "kvm/kvm-cpu.h"
+#include "kvm/spe.h"
+#include "kvm/util.h"
+
+#include "arm-common/gic.h"
+
+void spe__generate_fdt_nodes(void *fdt, struct kvm *kvm)
+{
+ const char compatible[] = "arm,statistical-profiling-extension-v1";
+ int irq = KVM_ARM_SPE_IRQ;
+
+ u32 cpu_mask = (((1 << kvm->nrcpus) - 1) << GIC_FDT_IRQ_PPI_CPU_SHIFT) \
+ & GIC_FDT_IRQ_PPI_CPU_MASK;
+ u32 irq_prop[] = {
+ cpu_to_fdt32(GIC_FDT_IRQ_TYPE_PPI),
+ cpu_to_fdt32(irq - 16),
+ cpu_to_fdt32(cpu_mask | IRQ_TYPE_LEVEL_HIGH),
+ };
+
+ if (!kvm->cfg.arch.has_spe)
+ return;
+
+ _FDT(fdt_begin_node(fdt, "spe"));
+ _FDT(fdt_property(fdt, "compatible", compatible, sizeof(compatible)));
+ _FDT(fdt_property(fdt, "interrupts", irq_prop, sizeof(irq_prop)));
+ _FDT(fdt_end_node(fdt));
+}
+
+static void spe_try_increase_mlock_limit(struct kvm *kvm)
+{
+ u64 size = kvm->ram_size;
+ struct rlimit mlock_limit, new_limit;
+
+ if (getrlimit(RLIMIT_MEMLOCK, &mlock_limit)) {
+ perror("getrlimit(RLIMIT_MEMLOCK)");
+ return;
+ }
+
+ if (mlock_limit.rlim_cur > size)
+ return;
+
+ new_limit.rlim_cur = size;
+ new_limit.rlim_max = max((rlim_t)size, mlock_limit.rlim_max);
+ /* Requires CAP_SYS_RESOURCE capability. */
+ setrlimit(RLIMIT_MEMLOCK, &new_limit);
+}
+
+static int spe_set_vcpu_attr(struct kvm_cpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret, fd;
+
+ fd = vcpu->vcpu_fd;
+
+ ret = ioctl(fd, KVM_HAS_DEVICE_ATTR, attr);
+ if (ret == -1) {
+ perror("SPE VCPU KVM_HAS_DEVICE_ATTR");
+ return ret;
+ }
+
+ ret = ioctl(fd, KVM_SET_DEVICE_ATTR, attr);
+ if (ret == -1)
+ perror("SPE VCPU KVM_SET_DEVICE_ATTR");
+
+ return ret;
+}
+
+static int spe__init(struct kvm *kvm)
+{
+ struct kvm_mem_bank *bank;
+ struct kvm_enable_cap lock_mem = {
+ .cap = KVM_CAP_ARM_LOCK_USER_MEMORY_REGION,
+ .flags = KVM_ARM_LOCK_USER_MEMORY_REGION_FLAGS_LOCK,
+ .args[1] = KVM_ARM_LOCK_MEM_READ | KVM_ARM_LOCK_MEM_WRITE,
+ };
+ int i, irq_num, ret;
+ u64 slot;
+
+ if (!kvm->cfg.arch.has_spe)
+ return 0;
+
+ if (!kvm__supports_extension(kvm, KVM_CAP_ARM_LOCK_USER_MEMORY_REGION))
+ die("KVM_CAP_ARM_LOCK_USER_MEMORY_REGION not supported");
+
+ slot = (u64)-1;
+ list_for_each_entry(bank, &kvm->mem_banks, list) {
+ if (bank->host_addr == kvm->ram_start) {
+ BUG_ON(bank->type != KVM_MEM_TYPE_RAM);
+ slot = bank->slot;
+ break;
+ }
+ }
+
+ if (slot == (u64)-1)
+ die("RAM bank not found");
+
+ spe_try_increase_mlock_limit(kvm);
+
+ lock_mem.args[0] = slot;
+ ret = ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &lock_mem);
+ if (ret == -1)
+ die_perror("KVM_CAP_ARM_LOCK_USER_MEMORY_REGION");
+
+ irq_num = KVM_ARM_SPE_IRQ;
+ for (i = 0; i < kvm->nrcpus; i++) {
+ struct kvm_device_attr spe_attr;
+
+ spe_attr = (struct kvm_device_attr){
+ .group = KVM_ARM_VCPU_SPE_CTRL,
+ .addr = (u64)(unsigned long)&irq_num,
+ .attr = KVM_ARM_VCPU_SPE_IRQ,
+ };
+
+ ret = spe_set_vcpu_attr(kvm->cpus[i], &spe_attr);
+ if (ret == -1)
+ return -EINVAL;
+
+ spe_attr = (struct kvm_device_attr){
+ .group = KVM_ARM_VCPU_SPE_CTRL,
+ .attr = KVM_ARM_VCPU_SPE_INIT,
+ };
+
+ ret = spe_set_vcpu_attr(kvm->cpus[i], &spe_attr);
+ if (ret == -1)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+last_init(spe__init);
@@ -9,6 +9,7 @@ struct kvm_config_arch {
bool virtio_trans_pci;
bool aarch32_guest;
bool has_pmuv3;
+ bool has_spe;
u64 kaslr_seed;
enum irqchip_type irqchip;
u64 fw_addr;
@@ -54,6 +54,10 @@ struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id)
!kvm__supports_extension(kvm, KVM_CAP_ARM_PMU_V3))
die("PMUv3 is not supported");
+ if (kvm->cfg.arch.has_spe &&
+ !kvm__supports_extension(kvm, KVM_CAP_ARM_SPE))
+ die("SPE is not supported");
+
vcpu = calloc(1, sizeof(struct kvm_cpu));
if (!vcpu)
return NULL;