@@ -10,10 +10,16 @@
#define __ASM_GENERIC_RQSPINLOCK_H
#include <linux/types.h>
+#include <vdso/time64.h>
struct qspinlock;
typedef struct qspinlock rqspinlock_t;
extern void resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
+/*
+ * Default timeout for waiting loops is 0.25 seconds
+ */
+#define RES_DEF_TIMEOUT (NSEC_PER_SEC / 4)
+
#endif /* __ASM_GENERIC_RQSPINLOCK_H */
@@ -6,9 +6,11 @@
* (C) Copyright 2013-2014,2018 Red Hat, Inc.
* (C) Copyright 2015 Intel Corp.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
+ * (C) Copyright 2024 Meta Platforms, Inc. and affiliates.
*
* Authors: Waiman Long <longman@redhat.com>
* Peter Zijlstra <peterz@infradead.org>
+ * Kumar Kartikeya Dwivedi <memxor@gmail.com>
*/
#include <linux/smp.h>
@@ -22,6 +24,7 @@
#include <asm/qspinlock.h>
#include <trace/events/lock.h>
#include <asm/rqspinlock.h>
+#include <linux/timekeeping.h>
/*
* Include queued spinlock definitions and statistics code
@@ -68,6 +71,45 @@
#include "mcs_spinlock.h"
+struct rqspinlock_timeout {
+ u64 timeout_end;
+ u64 duration;
+ u16 spin;
+};
+
+static noinline int check_timeout(struct rqspinlock_timeout *ts)
+{
+ u64 time = ktime_get_mono_fast_ns();
+
+ if (!ts->timeout_end) {
+ ts->timeout_end = time + ts->duration;
+ return 0;
+ }
+
+ if (time > ts->timeout_end)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+#define RES_CHECK_TIMEOUT(ts, ret) \
+ ({ \
+ if (!(ts).spin++) \
+ (ret) = check_timeout(&(ts)); \
+ (ret); \
+ })
+
+/*
+ * Initialize the 'spin' member.
+ */
+#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 1; })
+
+/*
+ * We only need to reset 'timeout_end', 'spin' will just wrap around as necessary.
+ * Duration is defined for each spin attempt, so set it here.
+ */
+#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
+
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
@@ -100,11 +142,14 @@ static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
{
struct mcs_spinlock *prev, *next, *node;
+ struct rqspinlock_timeout ts;
u32 old, tail;
int idx;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+ RES_INIT_TIMEOUT(ts);
+
/*
* Wait for in-progress pending->locked hand-overs with a bounded
* number of spins so that we guarantee forward progress.