@@ -244,7 +244,7 @@ cpu_init_done:
* Alignment checking enabled,
* MMU translation disabled (for now).
*/
- ldr r0, =(HSCTLR_BASE|SCTLR_A)
+ ldr r0, =(HSCTLR_BASE|SCTLR_AXX_A)
mcr CP32(r0, HSCTLR)
/*
@@ -369,7 +369,8 @@ virtphys_clash:
ldr r1, =paging /* Explicit vaddr, not RIP-relative */
mrc CP32(r0, HSCTLR)
- orr r0, r0, #(SCTLR_M|SCTLR_C) /* Enable MMU and D-cache */
+ /* Enable MMU and D-cache */
+ orr r0, r0, #(SCTLR_Axx_ELx_M|SCTLR_Axx_ELx_C)
dsb /* Flush PTE writes and finish reads */
mcr CP32(r0, HSCTLR) /* now paging is enabled */
isb /* Now, flush the icache */
@@ -514,8 +514,8 @@ virtphys_clash:
ldr x1, =paging /* Explicit vaddr, not RIP-relative */
mrs x0, SCTLR_EL2
- orr x0, x0, #SCTLR_M /* Enable MMU */
- orr x0, x0, #SCTLR_C /* Enable D-cache */
+ orr x0, x0, #SCTLR_Axx_ELx_M /* Enable MMU */
+ orr x0, x0, #SCTLR_Axx_ELx_C /* Enable D-cache */
dsb sy /* Flush PTE writes and finish reads */
msr SCTLR_EL2, x0 /* now paging is enabled */
isb /* Now, flush the icache */
@@ -612,7 +612,7 @@ bool guest_walk_tables(const struct vcpu *v, vaddr_t gva,
*perms = GV2M_READ;
/* If the MMU is disabled, there is no need to translate the gva. */
- if ( !(sctlr & SCTLR_M) )
+ if ( !(sctlr & SCTLR_Axx_ELx_M) )
{
*ipa = gva;
@@ -609,7 +609,7 @@ void __init remove_early_mappings(void)
*/
static void xen_pt_enforce_wnx(void)
{
- WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
+ WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
/*
* The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
* before flushing the TLBs.
@@ -392,9 +392,9 @@ static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode)
regs->cpsr |= PSR_IRQ_MASK;
if ( mode == PSR_MODE_ABT )
regs->cpsr |= PSR_ABT_MASK;
- if ( sctlr & SCTLR_TE )
+ if ( sctlr & SCTLR_A32_ELx_TE )
regs->cpsr |= PSR_THUMB;
- if ( sctlr & SCTLR_EE )
+ if ( sctlr & SCTLR_Axx_ELx_EE )
regs->cpsr |= PSR_BIG_ENDIAN;
}
@@ -402,7 +402,7 @@ static vaddr_t exception_handler32(vaddr_t offset)
{
uint32_t sctlr = READ_SYSREG32(SCTLR_EL1);
- if ( sctlr & SCTLR_V )
+ if ( sctlr & SCTLR_A32_EL1_V )
return 0xffff0000 + offset;
else /* always have security exceptions */
return READ_SYSREG(VBAR_EL1) + offset;
@@ -391,10 +391,12 @@ static inline int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
*/
static inline bool vcpu_has_cache_enabled(struct vcpu *v)
{
+ const uint32_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M;
+
/* Only works with the current vCPU */
ASSERT(current == v);
- return (READ_SYSREG32(SCTLR_EL1) & (SCTLR_C|SCTLR_M)) == (SCTLR_C|SCTLR_M);
+ return (READ_SYSREG32(SCTLR_EL1) & mask) == mask;
}
#endif /* _XEN_P2M_H */
@@ -112,26 +112,23 @@
#define TTBCR_PD1 (_AC(1,U)<<5)
/* SCTLR System Control Register. */
-/* HSCTLR is a subset of this. */
-#define SCTLR_TE (_AC(1,U)<<30)
-#define SCTLR_AFE (_AC(1,U)<<29)
-#define SCTLR_TRE (_AC(1,U)<<28)
-#define SCTLR_NMFI (_AC(1,U)<<27)
-#define SCTLR_EE (_AC(1,U)<<25)
-#define SCTLR_VE (_AC(1,U)<<24)
-#define SCTLR_U (_AC(1,U)<<22)
-#define SCTLR_FI (_AC(1,U)<<21)
-#define SCTLR_WXN (_AC(1,U)<<19)
-#define SCTLR_HA (_AC(1,U)<<17)
-#define SCTLR_RR (_AC(1,U)<<14)
-#define SCTLR_V (_AC(1,U)<<13)
-#define SCTLR_I (_AC(1,U)<<12)
-#define SCTLR_Z (_AC(1,U)<<11)
-#define SCTLR_SW (_AC(1,U)<<10)
-#define SCTLR_B (_AC(1,U)<<7)
-#define SCTLR_C (_AC(1,U)<<2)
-#define SCTLR_A (_AC(1,U)<<1)
-#define SCTLR_M (_AC(1,U)<<0)
+
+/* Bits specific to SCTLR_EL1 for Arm32 */
+
+#define SCTLR_A32_EL1_V (_AC(1,U)<<13)
+
+/* Common bits for SCTLR_ELx for Arm32 */
+
+#define SCTLR_A32_ELx_TE (_AC(1,U)<<30)
+#define SCTLR_A32_ELx_FI (_AC(1,U)<<21)
+
+/* Common bits for SCTLR_ELx on all architectures */
+#define SCTLR_Axx_ELx_EE (_AC(1,U)<<25)
+#define SCTLR_Axx_ELx_WXN (_AC(1,U)<<19)
+#define SCTLR_Axx_ELx_I (_AC(1,U)<<12)
+#define SCTLR_Axx_ELx_C (_AC(1,U)<<2)
+#define SCTLR_Axx_ELx_A (_AC(1,U)<<1)
+#define SCTLR_Axx_ELx_M (_AC(1,U)<<0)
#define HSCTLR_BASE _AC(0x30c51878,U)
The SCTLR_* are currently used for SCTLR/HSCTLR (arm32) and SCTLR_EL1/SCTLR_EL2 (arm64). The naming scheme is actually quite confusing because they may only be defined for an archicture (or even an exception level). So it is not easy for the developer to know which one to use. The naming scheme is reworked by adding Axx_ELx in each define: * xx is replaced by 32 or 64 if specific to an architecture * x is replaced by 2 (hypervisor) or 1 (kernel) if specific to an exception level While doing the renaming, remove the unused defines (or at least the ones that are unlikely going to be used). Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/arch/arm/arm32/head.S | 5 +++-- xen/arch/arm/arm64/head.S | 4 ++-- xen/arch/arm/guest_walk.c | 2 +- xen/arch/arm/mm.c | 2 +- xen/arch/arm/traps.c | 6 +++--- xen/include/asm-arm/p2m.h | 4 +++- xen/include/asm-arm/processor.h | 37 +++++++++++++++++-------------------- 7 files changed, 30 insertions(+), 30 deletions(-)