diff mbox series

parisc: Add lightweight spinlock checks

Message ID ZGtp9PdK1/ANsNLY@p100 (mailing list archive)
State Accepted, archived
Headers show
Series parisc: Add lightweight spinlock checks | expand

Commit Message

Helge Deller May 22, 2023, 1:11 p.m. UTC
Add a lightweight spinlock check which uses only two instructions
per spinlock call. It detects if a spinlock has been trashed by
some memory corruption and then halts the kernel. It will not detect
uninitialized spinlocks, for which CONFIG_DEBUG_SPINLOCK needs to
be enabled.

This lightweight spinlock check shouldn't influence runtime, so it's
safe to enable it by default.

The __ARCH_SPIN_LOCK_UNLOCKED_VAL constant has been choosen small enough
to be able to be loaded by one LDI assembler statement.

Signed-off-by: Helge Deller <deller@gmx.de>
diff mbox series

Patch

diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index ca39ee350c3f..d65934079ebd 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -2,13 +2,17 @@ 
 #ifndef __ASM_SPINLOCK_TYPES_H
 #define __ASM_SPINLOCK_TYPES_H

+#define __ARCH_SPIN_LOCK_UNLOCKED_VAL	0x1a46
+
 typedef struct {
 #ifdef CONFIG_PA20
 	volatile unsigned int slock;
-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
 #else
 	volatile unsigned int lock[4];
-# define __ARCH_SPIN_LOCK_UNLOCKED	{ { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED	\
+	{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+	    __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
 #endif
 } arch_spinlock_t;

diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a98940e64243..f7018a564f1d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -308,6 +308,14 @@  config TLB_PTLOCK
 	  updated consistently on SMP machines at the expense of some
 	  loss in performance.

+config LIGHTWEIGHT_SPINLOCK_CHECK
+	bool "Add lightweight spinlock checks"
+	default y
+	help
+	  Add lightweight checks to the spinlock functions to catch
+	  memory overwrites at runtime. Those checks will not detect
+	  uninitialized spinlocks.
+
 config HOTPLUG_CPU
 	bool
 	default y if SMP
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index a6e5d66a7656..edfcb9858bcb 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -7,10 +7,26 @@ 
 #include <asm/processor.h>
 #include <asm/spinlock_types.h>

+#define SPINLOCK_BREAK_INSN	0x0000c006	/* break 6,6 */
+
+static inline void arch_spin_val_check(int lock_val)
+{
+	if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+		asm volatile(	"andcm,= %0,%1,%%r0\n"
+				".word %2\n"
+		: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
+			"i" (SPINLOCK_BREAK_INSN));
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *x)
 {
-	volatile unsigned int *a = __ldcw_align(x);
-	return READ_ONCE(*a) == 0;
+	volatile unsigned int *a;
+	int lock_val;
+
+	a = __ldcw_align(x);
+	lock_val = READ_ONCE(*a);
+	arch_spin_val_check(lock_val);
+	return (lock_val == 0);
 }

 static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -18,9 +34,18 @@  static inline void arch_spin_lock(arch_spinlock_t *x)
 	volatile unsigned int *a;

 	a = __ldcw_align(x);
-	while (__ldcw(a) == 0)
+	do {
+		int lock_val_old;
+
+		lock_val_old = __ldcw(a);
+		arch_spin_val_check(lock_val_old);
+		if (lock_val_old)
+			return;	/* got lock */
+
+		/* wait until we should try to get lock again */
 		while (*a == 0)
 			continue;
+	} while (1);
 }

 static inline void arch_spin_unlock(arch_spinlock_t *x)
@@ -29,15 +54,19 @@  static inline void arch_spin_unlock(arch_spinlock_t *x)

 	a = __ldcw_align(x);
 	/* Release with ordered store. */
-	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+	__asm__ __volatile__("stw,ma %0,0(%1)"
+		: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
 }

 static inline int arch_spin_trylock(arch_spinlock_t *x)
 {
 	volatile unsigned int *a;
+	int lock_val;

 	a = __ldcw_align(x);
-	return __ldcw(a) != 0;
+	lock_val = __ldcw(a);
+	arch_spin_val_check(lock_val);
+	return lock_val != 0;
 }

 /*
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9696fbf646c..861cb19eb12c 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -308,6 +308,11 @@  static void handle_break(struct pt_regs *regs)
 		return;
 	}
 #endif
+        if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) &&
+		(iir == SPINLOCK_BREAK_INSN) &&
+		!user_mode(regs)) {
+		die_if_kernel("Spinlock was trashed", regs, 1);
+	}

 	if (unlikely(iir != GDB_BREAK_INSN))
 		parisc_printk_ratelimited(0, regs,