@@ -278,8 +278,12 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return NULL;
}
- /* Move on to the next pool. */
- pool_index++;
+ /*
+ * Move on to the next pool.
+ * WRITE_ONCE pairs with potential concurrent read in
+ * stack_depot_fetch().
+ */
+ WRITE_ONCE(pool_index, pool_index + 1);
pool_offset = 0;
/*
* If the maximum number of pools is not reached, take note
@@ -502,6 +506,11 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
+ /*
+ * READ_ONCE pairs with potential concurrent write in
+ * depot_alloc_stack.
+ */
+ int pool_index_cached = READ_ONCE(pool_index);
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
@@ -510,7 +519,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (!handle)
return 0;
- if (parts.pool_index > pool_index) {
+ if (parts.pool_index > pool_index_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pool_index, handle);
return 0;