@@ -459,6 +459,7 @@ static void arm_gicv3_class_init(ObjectClass *klass, void *data)
ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
agcc->post_load = arm_gicv3_post_load;
+ agcc->init_cpu_reginfo = gicv3_init_cpu_reginfo;
device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
}
@@ -3033,143 +3033,138 @@ static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
gicv3_cpuif_virt_irq_fiq_update(cs);
}
-void gicv3_init_cpuif(GICv3State *s)
+void gicv3_init_cpu_reginfo(CPUState *cs)
{
- /* Called from the GICv3 realize function; register our system
- * registers with the CPU
- */
- int i;
+ ARMCPU *cpu = ARM_CPU(cs);
+ GICv3CPUState *gcs = icc_cs_from_env(&cpu->env);
- for (i = 0; i < s->num_cpu; i++) {
- ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
- GICv3CPUState *cs = &s->cpu[i];
+ /*
+ * If the CPU doesn't define a GICv3 configuration, probably because
+ * in real hardware it doesn't have one, then we use default values
+ * matching the one used by most Arm CPUs. This applies to:
+ * cpu->gic_num_lrs
+ * cpu->gic_vpribits
+ * cpu->gic_vprebits
+ * cpu->gic_pribits
+ */
- /*
- * If the CPU doesn't define a GICv3 configuration, probably because
- * in real hardware it doesn't have one, then we use default values
- * matching the one used by most Arm CPUs. This applies to:
- * cpu->gic_num_lrs
- * cpu->gic_vpribits
- * cpu->gic_vprebits
- * cpu->gic_pribits
- */
+ /*
+ * Note that we can't just use the GICv3CPUState as an opaque pointer
+ * in define_arm_cp_regs_with_opaque(), because when we're called back
+ * it might be with code translated by CPU 0 but run by CPU 1, in
+ * which case we'd get the wrong value.
+ * So instead we define the regs with no ri->opaque info, and
+ * get back to the GICv3CPUState from the CPUARMState.
+ *
+ * These CP regs callbacks can be called from either TCG or HVF code.
+ */
+ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
- /* Note that we can't just use the GICv3CPUState as an opaque pointer
- * in define_arm_cp_regs_with_opaque(), because when we're called back
- * it might be with code translated by CPU 0 but run by CPU 1, in
- * which case we'd get the wrong value.
- * So instead we define the regs with no ri->opaque info, and
- * get back to the GICv3CPUState from the CPUARMState.
- *
- * These CP regs callbacks can be called from either TCG or HVF code.
- */
- define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ /*
+ * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
+ * implement FEAT_GICv3_NMI, which is the CPU interface part
+ * of NMI support. This is distinct from whether the GIC proper
+ * (redistributors and distributor) have NMI support. In QEMU
+ * that is a property of the GIC device in s->nmi_support;
+ * cs->nmi_support indicates the CPU interface's support.
+ */
+ if (cpu_isar_feature(aa64_nmi, cpu)) {
+ gcs->nmi_support = true;
+ define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
+ }
- /*
- * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
- * implement FEAT_GICv3_NMI, which is the CPU interface part
- * of NMI support. This is distinct from whether the GIC proper
- * (redistributors and distributor) have NMI support. In QEMU
- * that is a property of the GIC device in s->nmi_support;
- * cs->nmi_support indicates the CPU interface's support.
- */
- if (cpu_isar_feature(aa64_nmi, cpu)) {
- cs->nmi_support = true;
- define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
- }
+ /*
+ * The CPU implementation specifies the number of supported
+ * bits of physical priority. For backwards compatibility
+ * of migration, we have a compat property that forces use
+ * of 8 priority bits regardless of what the CPU really has.
+ */
+ if (gcs->gic->force_8bit_prio) {
+ gcs->pribits = 8;
+ } else {
+ gcs->pribits = cpu->gic_pribits ?: 5;
+ }
- /*
- * The CPU implementation specifies the number of supported
- * bits of physical priority. For backwards compatibility
- * of migration, we have a compat property that forces use
- * of 8 priority bits regardless of what the CPU really has.
- */
- if (s->force_8bit_prio) {
- cs->pribits = 8;
- } else {
- cs->pribits = cpu->gic_pribits ?: 5;
- }
+ /*
+ * The GICv3 has separate ID register fields for virtual priority
+ * and preemption bit values, but only a single ID register field
+ * for the physical priority bits. The preemption bit count is
+ * always the same as the priority bit count, except that 8 bits
+ * of priority means 7 preemption bits. We precalculate the
+ * preemption bits because it simplifies the code and makes the
+ * parallels between the virtual and physical bits of the GIC
+ * a bit clearer.
+ */
+ gcs->prebits = gcs->pribits;
+ if (gcs->prebits == 8) {
+ gcs->prebits--;
+ }
+ /*
+ * Check that CPU code defining pribits didn't violate
+ * architectural constraints our implementation relies on.
+ */
+ g_assert(gcs->pribits >= 4 && gcs->pribits <= 8);
- /*
- * The GICv3 has separate ID register fields for virtual priority
- * and preemption bit values, but only a single ID register field
- * for the physical priority bits. The preemption bit count is
- * always the same as the priority bit count, except that 8 bits
- * of priority means 7 preemption bits. We precalculate the
- * preemption bits because it simplifies the code and makes the
- * parallels between the virtual and physical bits of the GIC
- * a bit clearer.
- */
- cs->prebits = cs->pribits;
- if (cs->prebits == 8) {
- cs->prebits--;
- }
- /*
- * Check that CPU code defining pribits didn't violate
- * architectural constraints our implementation relies on.
- */
- g_assert(cs->pribits >= 4 && cs->pribits <= 8);
+ /*
+ * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
+ * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
+ */
+ if (gcs->prebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
+ }
+ if (gcs->prebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
+ }
- /*
- * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
- * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
- */
- if (cs->prebits >= 6) {
- define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
- }
- if (cs->prebits == 7) {
- define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
- }
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
+ int j;
- if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
- int j;
+ gcs->num_list_regs = cpu->gic_num_lrs ?: 4;
+ gcs->vpribits = cpu->gic_vpribits ?: 5;
+ gcs->vprebits = cpu->gic_vprebits ?: 5;
- cs->num_list_regs = cpu->gic_num_lrs ?: 4;
- cs->vpribits = cpu->gic_vpribits ?: 5;
- cs->vprebits = cpu->gic_vprebits ?: 5;
- /* Check against architectural constraints: getting these
- * wrong would be a bug in the CPU code defining these,
- * and the implementation relies on them holding.
- */
- g_assert(cs->vprebits <= cs->vpribits);
- g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
- g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
+ /* Check against architectural constraints: getting these
+ * wrong would be a bug in the CPU code defining these,
+ * and the implementation relies on them holding.
+ */
+ g_assert(gcs->vprebits <= gcs->vpribits);
+ g_assert(gcs->vprebits >= 5 && gcs->vprebits <= 7);
+ g_assert(gcs->vpribits >= 5 && gcs->vpribits <= 8);
- define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
+ define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
- for (j = 0; j < cs->num_list_regs; j++) {
- /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
- * are split into two cp15 regs, LR (the low part, with the
- * same encoding as the AArch64 LR) and LRC (the high part).
- */
- ARMCPRegInfo lr_regset[] = {
- { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 4, .crn = 12,
- .crm = 12 + (j >> 3), .opc2 = j & 7,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .nv2_redirect_offset = 0x400 + 8 * j,
- .access = PL2_RW,
- .readfn = ich_lr_read,
- .writefn = ich_lr_write,
- },
- { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 4, .crn = 12,
- .crm = 14 + (j >> 3), .opc2 = j & 7,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL2_RW,
- .readfn = ich_lr_read,
- .writefn = ich_lr_write,
- },
- };
- define_arm_cp_regs(cpu, lr_regset);
- }
- if (cs->vprebits >= 6) {
- define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
- }
- if (cs->vprebits == 7) {
- define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
- }
+ for (j = 0; j < gcs->num_list_regs; j++) {
+ /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
+ * are split into two cp15 regs, LR (the low part, with the
+ * same encoding as the AArch64 LR) and LRC (the high part).
+ */
+ ARMCPRegInfo lr_regset[] = {
+ { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12,
+ .crm = 12 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .nv2_redirect_offset = 0x400 + 8 * j,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 12,
+ .crm = 14 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ };
+ define_arm_cp_regs(cpu, lr_regset);
+ }
+ if (gcs->vprebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
+ }
+ if (gcs->vprebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
}
if (tcg_enabled() || qtest_enabled()) {
/*
@@ -3177,7 +3172,7 @@ void gicv3_init_cpuif(GICv3State *s)
* state only changes on EL changes involving EL2 or EL3, so for
* the non-TCG case this is OK, as EL2 and EL3 can't exist.
*/
- arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
+ arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, gcs);
} else {
assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));
@@ -20,3 +20,16 @@ void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s)
env->gicv3state = (void *)s;
};
+
+void gicv3_init_cpuif(GICv3State *s)
+{
+ ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_GET_CLASS(s);
+ int i;
+
+ /* define and register `system registers` with the vCPU */
+ for (i = 0; i < s->num_cpu; i++) {
+ if (gicv3_cpu_accessible(&s->cpu[i])) {
+ agcc->init_cpu_reginfo(s->cpu[i].cpu);
+ }
+ }
+}
@@ -796,6 +796,10 @@ static void vm_change_state_handler(void *opaque, bool running,
}
}
+static void kvm_gicv3_init_cpu_reginfo(CPUState *cs)
+{
+ define_arm_cp_regs(ARM_CPU(cs), gicv3_cpuif_reginfo);
+}
static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
{
@@ -831,13 +835,8 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL);
- for (i = 0; i < s->num_cpu; i++) {
- ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
-
- if (gicv3_cpu_accessible(&s->cpu[i])) {
- define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
- }
- }
+ /* initialize vCPU interface */
+ gicv3_init_cpuif(s);
/* Try to create the device via the device control API */
s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false);
@@ -923,6 +922,7 @@ static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
agcc->pre_save = kvm_arm_gicv3_get;
agcc->post_load = kvm_arm_gicv3_put;
+ agcc->init_cpu_reginfo = kvm_gicv3_init_cpu_reginfo;
device_class_set_parent_realize(dc, kvm_arm_gicv3_realize,
&kgc->parent_realize);
resettable_class_set_parent_phases(rc, NULL, kvm_arm_gicv3_reset_hold, NULL,
@@ -722,6 +722,7 @@ void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr);
void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
void gicv3_init_cpuif(GICv3State *s);
+void gicv3_init_cpu_reginfo(CPUState *cs);
/**
* gicv3_cpuif_update:
@@ -339,6 +339,7 @@ struct ARMGICv3CommonClass {
void (*pre_save)(GICv3State *s);
void (*post_load)(GICv3State *s);
+ void (*init_cpu_reginfo)(CPUState *cs);
};
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,