new file mode 100644
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RCUREF_LONG_H
+#define _LINUX_RCUREF_LONG_H
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/limits.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+#include <linux/rcuref.h>
+
+#ifdef CONFIG_64BIT
+#define RCUREF_LONG_ONEREF 0x0000000000000000U
+#define RCUREF_LONG_MAXREF 0x7FFFFFFFFFFFFFFFU
+#define RCUREF_LONG_SATURATED 0xA000000000000000U
+#define RCUREF_LONG_RELEASED 0xC000000000000000U
+#define RCUREF_LONG_DEAD 0xE000000000000000U
+#define RCUREF_LONG_NOREF 0xFFFFFFFFFFFFFFFFU
+#else
+#define RCUREF_LONG_ONEREF RCUREF_ONEREF
+#define RCUREF_LONG_MAXREF RCUREF_MAXREF
+#define RCUREF_LONG_SATURATED RCUREF_SATURATED
+#define RCUREF_LONG_RELEASED RCUREF_RELEASED
+#define RCUREF_LONG_DEAD RCUREF_DEAD
+#define RCUREF_LONG_NOREF RCUREF_NOREF
+#endif
+
+/**
+ * rcuref_long_init - Initialize a rcuref reference count with the given reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void rcuref_long_init(rcuref_long_t *ref, unsigned long cnt)
+{
+ atomic_long_set(&ref->refcnt, cnt - 1);
+}
+
+/**
+ * rcuref_long_read - Read the number of held reference counts of a rcuref
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned long rcuref_long_read(rcuref_long_t *ref)
+{
+ unsigned long c = atomic_long_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= RCUREF_LONG_RELEASED ? 0 : c + 1;
+}
+
+__must_check bool rcuref_long_get_slowpath(rcuref_long_t *ref);
+
+/**
+ * rcuref_long_get - Acquire one reference on a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_long_inc_not_zero() but saturates at RCUREF_LONG_MAXREF.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See documentation in lib/rcuref.c
+ *
+ * Return:
+ * False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already
+ *
+ * True if a reference was successfully acquired
+ */
+static inline __must_check bool rcuref_long_get(rcuref_long_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_long_add_negative_relaxed(1, &ref->refcnt)))
+ return true;
+
+ /* Handle the cases inside the saturation and dead zones */
+ return rcuref_long_get_slowpath(ref);
+}
+
+__must_check bool rcuref_long_put_slowpath(rcuref_long_t *ref);
+
+/*
+ * Internal helper. Do not invoke directly.
+ */
+static __always_inline __must_check bool __rcuref_long_put(rcuref_long_t *ref)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ "suspicious rcuref_put_rcusafe() usage");
+ /*
+ * Unconditionally decrease the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_long_add_negative_release(-1, &ref->refcnt)))
+ return false;
+
+ /*
+ * Handle the last reference drop and cases inside the saturation
+ * and dead zones.
+ */
+ return rcuref_long_put_slowpath(ref);
+}
+
+/**
+ * rcuref_long_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Can be invoked from contexts, which guarantee that no grace period can
+ * happen which would free the object concurrently if the decrement drops
+ * the last reference and the slowpath races against a concurrent get() and
+ * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release the
+ * object which is protected by the reference counter.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static inline __must_check bool rcuref_long_put_rcusafe(rcuref_long_t *ref)
+{
+ return __rcuref_long_put(ref);
+}
+
+/**
+ * rcuref_long_put -- Release one reference for a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Can be invoked from any context.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return:
+ *
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+static inline __must_check bool rcuref_long_put(rcuref_long_t *ref)
+{
+ bool released;
+
+ preempt_disable();
+ released = __rcuref_long_put(ref);
+ preempt_enable();
+ return released;
+}
+
+#endif
@@ -176,6 +176,7 @@
#include <linux/export.h>
#include <linux/rcuref.h>
+#include <linux/rcuref_long.h>
/**
* rcuref_get_slowpath - Slowpath of rcuref_get()
@@ -217,6 +218,46 @@ bool rcuref_get_slowpath(rcuref_t *ref)
}
EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+/**
+ * rcuref_long_get_slowpath - Slowpath of rcuref_long_get()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * False if the reference count was already marked dead
+ *
+ * True if the reference count is saturated, which prevents the
+ * object from being deconstructed ever.
+ */
+bool rcuref_long_get_slowpath(rcuref_long_t *ref)
+{
+ unsigned long cnt = atomic_long_read(&ref->refcnt);
+
+ /*
+ * If the reference count was already marked dead, undo the
+ * increment so it stays in the middle of the dead zone and return
+ * fail.
+ */
+ if (cnt >= RCUREF_LONG_RELEASED) {
+ atomic_long_set(&ref->refcnt, RCUREF_LONG_DEAD);
+ return false;
+ }
+
+ /*
+ * If it was saturated, warn and mark it so. In case the increment
+ * was already on a saturated value restore the saturation
+ * marker. This keeps it in the middle of the saturation zone and
+ * prevents the reference count from overflowing. This leaks the
+ * object memory, but prevents the obvious reference count overflow
+ * damage.
+ */
+ if (WARN_ONCE(cnt > RCUREF_LONG_MAXREF, "rcuref saturated - leaking memory"))
+ atomic_long_set(&ref->refcnt, RCUREF_LONG_SATURATED);
+ return true;
+}
+EXPORT_SYMBOL_GPL(rcuref_long_get_slowpath);
+
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
@@ -279,3 +320,66 @@ bool rcuref_put_slowpath(rcuref_t *ref)
return false;
}
EXPORT_SYMBOL_GPL(rcuref_put_slowpath);
+
+/**
+ * rcuref_long_put_slowpath - Slowpath of __rcuref_long_put()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+bool rcuref_long_put_slowpath(rcuref_long_t *ref)
+{
+ unsigned long cnt = atomic_long_read(&ref->refcnt);
+
+ /* Did this drop the last reference? */
+ if (likely(cnt == RCUREF_LONG_NOREF)) {
+ /*
+ * Carefully try to set the reference count to RCUREF_LONG_DEAD.
+ *
+ * This can fail if a concurrent get() operation has
+ * elevated it again or the corresponding put() even marked
+ * it dead already. Both are valid situations and do not
+ * require a retry. If this fails the caller is not
+ * allowed to deconstruct the object.
+ */
+ if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_LONG_DEAD))
+ return false;
+
+ /*
+ * The caller can safely schedule the object for
+ * deconstruction. Provide acquire ordering.
+ */
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= RCUREF_LONG_RELEASED, "rcuref - imbalanced put()")) {
+ atomic_long_set(&ref->refcnt, RCUREF_LONG_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > RCUREF_LONG_MAXREF)
+ atomic_long_set(&ref->refcnt, RCUREF_LONG_SATURATED);
+ return false;
+}
+EXPORT_SYMBOL_GPL(rcuref_long_put_slowpath);
Add a variant of rcuref helpers that operate on atomic_long_t instead of atomic_t so rcuref can be used for data structures that require atomic_long_t. Signed-off-by: Christian Brauner <brauner@kernel.org> --- include/linux/rcuref_long.h | 165 ++++++++++++++++++++++++++++++++++++++++++++ lib/rcuref.c | 104 ++++++++++++++++++++++++++++ 2 files changed, 269 insertions(+)