@@ -46,3 +46,4 @@ stable hypervisors.
| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
| ARM | Cortex-A57 | #852523 | N/A |
+| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
@@ -122,6 +122,26 @@ config ARM64_ERRATUM_819472
the kernel if an affected CPU is detected.
If unsure, say Y.
+
+config ARM64_ERRATUM_832075
+ bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads"
+ default y
+ depends on ARM_64
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 832075 on Cortex-A57 parts up to r1p2.
+
+ Affected Cortex-A57 parts might deadlock when exclusive load/store
+ instructions to Write-Back memory are mixed with Device loads.
+
+ The workaround is to promote device loads to use Load-Acquire
+ semantics.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
endmenu
source "common/Kconfig"
@@ -34,6 +34,15 @@ static const struct arm_cpu_capabilities arm_errata[] = {
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_832075
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
{},
};
@@ -22,6 +22,7 @@
#include <asm/system.h>
#include <asm/byteorder.h>
+#include <asm/alternative.h>
/*
* Generic IO read/write. These perform native-endian accesses.
@@ -49,28 +50,40 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
- asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
+ "ldarb %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
- asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
+ "ldarh %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
- asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %w0, [%1]",
+ "ldar %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
- asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %0, [%1]",
+ "ldar %0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
@@ -36,8 +36,9 @@
#define cpu_has_security (boot_cpu_feature32(security) > 0)
#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM_NCAPS 1
+#define ARM_NCAPS 2
#ifndef __ASSEMBLY__
@@ -47,8 +47,10 @@
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_PART_CORTEX_A53 0xD03
+#define ARM_CPU_PART_CORTEX_A57 0xD07
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
/* MPIDR Multiprocessor Affinity Register */
#define _MPIDR_UP (30)
The ARM erratum 832075 applies to certain revisions of Cortex-A57, one of the workarounds is to change device loads into using load-acquire semantics. Use the alternative framework to enable the workaround only on affected cores. Whilst a guest could trigger the deadlock, it can be broken when the processor is receiving an interrupt. As the Xen scheduler will always setup a timer (firing to every 1ms to 300ms depending on the running time slice) on each processor, the deadlock would last only few milliseconds and only affects the guest time slice. Therefore a malicious guest could only hurt itself. Note that all the guests should implement/enable the workaround for the affected cores. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v2: - Update the commit message to explain why it is not necessary to take care of possible deadlock from the guest. --- docs/misc/arm/silicon-errata.txt | 1 + xen/arch/arm/Kconfig | 20 ++++++++++++++++++++ xen/arch/arm/cpuerrata.c | 9 +++++++++ xen/include/asm-arm/arm64/io.h | 21 +++++++++++++++++---- xen/include/asm-arm/cpufeature.h | 3 ++- xen/include/asm-arm/processor.h | 2 ++ 6 files changed, 51 insertions(+), 5 deletions(-)