@@ -966,3 +966,86 @@ seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head,
return NULL;
}
EXPORT_SYMBOL(seq_hlist_next_percpu);
+
+/**
+ * seq_hlist_start_precpu_locked - start an iteration of a percpu hlist array
+ * @head: pointer to percpu array of struct hlist_heads
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ * @pos: start position of sequence
+ *
+ * Called at seq_file->op->start().
+ */
+struct hlist_node *
+seq_hlist_start_percpu_locked(struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock, int *cpu, loff_t pos)
+{
+ struct hlist_node *node;
+
+ for_each_possible_cpu(*cpu) {
+ spin_lock(per_cpu_ptr(lock, *cpu));
+ hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
+ if (pos-- == 0)
+ return node;
+ }
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_start_percpu_locked);
+
+/**
+ * seq_hlist_next_percpu_locked - move to the next position of the percpu hlist array
+ * @v: pointer to current hlist_node
+ * @head: pointer to percpu array of struct hlist_heads
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ * @pos: start position of sequence
+ *
+ * Called at seq_file->op->next().
+ */
+struct hlist_node *
+seq_hlist_next_percpu_locked(void *v, struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t *pos)
+{
+ struct hlist_node *node = v;
+
+ ++*pos;
+
+ if (node->next)
+ return node->next;
+
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+
+ for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
+ *cpu = cpumask_next(*cpu, cpu_possible_mask)) {
+ struct hlist_head *bucket;
+
+ spin_lock(per_cpu_ptr(lock, *cpu));
+ bucket = per_cpu_ptr(head, *cpu);
+
+ if (!hlist_empty(bucket))
+ return bucket->first;
+
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_next_percpu_locked);
+
+/**
+ * seq_hlist_stop_percpu_locked - stop iterating over the percpu hlist array
+ * @v: pointer to current hlist_node
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ *
+ * Called at seq_file->op->stop().
+ */
+void
+seq_hlist_stop_percpu_locked(void *v, spinlock_t __percpu *lock, int *cpu)
+{
+ if (v)
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+}
+EXPORT_SYMBOL(seq_hlist_stop_percpu_locked);
@@ -184,4 +184,17 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea
extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
+extern struct hlist_node *seq_hlist_start_percpu_locked(
+ struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t pos);
+
+extern struct hlist_node *seq_hlist_next_percpu_locked(
+ void *v, struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t *pos);
+
+extern void seq_hlist_stop_percpu_locked(
+ void *v, spinlock_t __percpu *lock, int *cpu);
+
#endif
Introduce a variant of the seq_hlist helpers for iterating seq_hlist are protected by perpcu spinlocks. Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Cc: Jeff Layton <jlayton@poochiereds.net> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> --- fs/seq_file.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/seq_file.h | 13 ++++++++ 2 files changed, 96 insertions(+)