@@ -71,14 +71,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (has_vhe() || has_hvhe())
vcpu->arch.hcr_el2 |= HCR_E2H;
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
/* route synchronous external abort exceptions to EL2 */
vcpu->arch.hcr_el2 |= HCR_TEA;
/* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR;
}
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
vcpu->arch.hcr_el2 |= HCR_FWB;
} else {
/*
@@ -1052,7 +1052,7 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
static inline bool kvm_system_needs_idmapped_vectors(void)
{
- return cpus_have_const_cap(ARM64_SPECTRE_V3A);
+ return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -218,7 +218,7 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
* faulting in pages. Furthermore, FWB implies IDC, so cleaning to
* PoU is not required either in this case.
*/
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return;
kvm_flush_dcache_to_poc(va, size);
@@ -284,7 +284,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_arm_pvtime_supported();
break;
case KVM_CAP_ARM_EL1_32BIT:
- r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
+ r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
break;
case KVM_CAP_GUEST_DEBUG_HW_BPS:
r = get_num_brps();
@@ -296,7 +296,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_arm_support_pmu_v3();
break;
case KVM_CAP_ARM_INJECT_SERROR_ESR:
- r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+ r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
break;
case KVM_CAP_ARM_VM_IPA_SIZE:
r = get_kvm_ipa_limit();
@@ -1207,7 +1207,7 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
return 0;
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+ if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
return -EINVAL;
/* MTE is incompatible with AArch32 */
@@ -1777,7 +1777,7 @@ static void hyp_install_host_vector(void)
* Call initialization code, and switch to the full blown HYP code.
* If the cpucaps haven't been finalized yet, something has gone very
* wrong, and hyp will crash and burn when it uses any
- * cpus_have_const_cap() wrapper.
+ * cpus_have_*_cap() wrapper.
*/
BUG_ON(!system_capabilities_finalized());
params = this_cpu_ptr_nvhe_sym(kvm_init_params);
@@ -2310,7 +2310,7 @@ static int __init init_hyp_mode(void)
if (is_protected_kvm_enabled()) {
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
- cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
+ cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
pkvm_hyp_init_ptrauth();
init_cpu_logical_map();
@@ -815,7 +815,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
- events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+ events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
@@ -837,7 +837,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr) {
- if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
@@ -664,7 +664,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
static bool stage2_has_fwb(struct kvm_pgtable *pgt)
{
- if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return false;
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
@@ -1578,7 +1578,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (device)
prot |= KVM_PGTABLE_PROT_DEVICE;
- else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
+ else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
/*
@@ -207,7 +207,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* CPU left in the system, and certainly not from non-secure
* software).
*/
- if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
kvm_set_way_flush(vcpu);
return true;
@@ -684,7 +684,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n");
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
group0_trap = true;
group1_trap = true;
}