@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_latch_retry(&root->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&root->seq, seq));
return node;
}
@@ -671,9 +671,9 @@ typedef struct {
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
- * with read_seqcount_latch_retry().
+ * with raw_read_seqcount_latch_retry().
*/
-static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{
/*
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -683,16 +683,17 @@ static inline unsigned raw_read_seqcount
}
/**
- * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
-static inline int
-read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
- return read_seqcount_retry(&s->seqcount, start);
+ smp_rmb();
+ return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
@@ -752,7 +753,7 @@ read_seqcount_latch_retry(const seqcount
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
- * } while (read_seqcount_latch_retry(&latch->seq, seq));
+ * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -597,7 +597,7 @@ static u64 latched_seq_read_nolock(struc
seq = raw_read_seqcount_latch(&ls->latch);
idx = seq & 0x1;
val = ls->val[idx];
- } while (read_seqcount_latch_retry(&ls->latch, seq));
+ } while (raw_read_seqcount_latch_retry(&ls->latch, seq));
return val;
}
@@ -77,7 +77,7 @@ notrace struct clock_read_data *sched_cl
notrace int sched_clock_read_retry(unsigned int seq)
{
- return read_seqcount_latch_retry(&cd.seq, seq);
+ return raw_read_seqcount_latch_retry(&cd.seq, seq);
}
unsigned long long notrace sched_clock(void)
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_f
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += fast_tk_get_delta_ns(tkr);
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
return now;
}
@@ -566,7 +566,7 @@ static __always_inline u64 __ktime_get_r
basem = ktime_to_ns(tkr->base);
baser = ktime_to_ns(tkr->base_real);
delta = fast_tk_get_delta_ns(tkr);
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
if (mono)
*mono = basem + delta;
The read side of seqcount_latch consists of: do { seq = raw_read_seqcount_latch(&latch->seq); ... } while (read_seqcount_latch_retry(&latch->seq, seq)); which is asymmetric in the raw_ department, and sure enough, read_seqcount_latch_retry() includes (explicit) instrumentation where raw_read_seqcount_latch() does not. This inconsistency becomes a problem when trying to use it from noinstr code. As such, fix it by renaming and re-implementing raw_read_seqcount_latch_retry() without the instrumentation. Specifically the instrumentation in question is kcsan_atomic_next(0) in do___read_seqcount_retry(). Loosing this annotation is not a problem because raw_read_seqcount_latch() does not pass through kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- include/linux/rbtree_latch.h | 2 +- include/linux/seqlock.h | 15 ++++++++------- kernel/printk/printk.c | 2 +- kernel/time/sched_clock.c | 2 +- kernel/time/timekeeping.c | 4 ++-- 5 files changed, 13 insertions(+), 12 deletions(-)