@@ -383,7 +383,7 @@ struct kvm_mmu {
* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
*/
- u32 pkru_mask;
+ u32 pkr_mask;
u64 *pae_root;
u64 *lm_root;
@@ -190,8 +190,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u32 errcode = PFERR_PRESENT_MASK;
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
- if (unlikely(mmu->pkru_mask)) {
- u32 pkru_bits, offset;
+ if (unlikely(mmu->pkr_mask)) {
+ u32 pkr_bits, offset;
/*
* PKRU defines 32 bits, there are 16 domains and 2
@@ -199,15 +199,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain.
*/
- pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
+ pkr_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) +
((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
- pkru_bits &= mmu->pkru_mask >> offset;
- errcode |= -pkru_bits & PFERR_PK_MASK;
- fault |= (pkru_bits != 0);
+ pkr_bits &= mmu->pkr_mask >> offset;
+ errcode |= -pkr_bits & PFERR_PK_MASK;
+ fault |= (pkr_bits != 0);
}
return -(u32)fault & errcode;
@@ -4301,20 +4301,20 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
-static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+static void update_pkr_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool ept)
{
unsigned bit;
bool wp;
if (ept) {
- mmu->pkru_mask = 0;
+ mmu->pkr_mask = 0;
return;
}
/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
- mmu->pkru_mask = 0;
+ mmu->pkr_mask = 0;
return;
}
@@ -4348,7 +4348,7 @@ static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
/* PKRU.WD stops write access. */
pkey_bits |= (!!check_write) << 1;
- mmu->pkru_mask |= (pkey_bits & 3) << pfec;
+ mmu->pkr_mask |= (pkey_bits & 3) << pfec;
}
}
@@ -4370,7 +4370,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
+ update_pkr_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
MMU_WARN_ON(!is_pae(vcpu));
@@ -4400,7 +4400,7 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
+ update_pkr_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
context->page_fault = paging32_page_fault;
@@ -4519,7 +4519,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
}
update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
+ update_pkr_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
@@ -4667,7 +4667,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->mmu_role.as_u64 = new_role.as_u64;
update_permission_bitmask(vcpu, context, true);
- update_pkru_bitmask(vcpu, context, true);
+ update_pkr_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
@@ -4738,7 +4738,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
}
update_permission_bitmask(vcpu, g_context, false);
- update_pkru_bitmask(vcpu, g_context, false);
+ update_pkr_bitmask(vcpu, g_context, false);
update_last_nonleaf_level(vcpu, g_context);
}