@@ -885,6 +885,7 @@ struct fast_pool {
unsigned short reg_idx;
unsigned char count;
int event_entropy_shift;
+ struct queued_entropy q;
};
/*
@@ -1655,7 +1656,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
__u32 c_high, j_high;
__u64 ip;
bool reseed;
- struct queued_entropy q = { 0 };
+ struct queued_entropy *q = &fast_pool->q;
unsigned int nfrac;
if (cycles == 0)
@@ -1700,9 +1701,9 @@ void add_interrupt_randomness(int irq, int irq_flags)
nfrac = fast_pool_entropy(fast_pool->count,
fast_pool->event_entropy_shift);
}
- __queue_entropy(r, &q, nfrac);
+ __queue_entropy(r, q, nfrac);
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- reseed = __dispatch_queued_entropy_fast(r, &q);
+ reseed = __dispatch_queued_entropy_fast(r, q);
spin_unlock(&r->lock);
fast_pool->last = now;
When health tests are introduced with upcoming patches, it will become necessary to keep entropy queued across add_interrupt_randomness() invocations for later dispatch to the global balance. Prepare for this by adding a struct queued_entropy member to the per-CPU fast_pool. Use it in place of that queue with automatic storage duration in add_interrupt_randomness(). Signed-off-by: Nicolai Stange <nstange@suse.de> --- drivers/char/random.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)