diff mbox series

[RFC,13/41] random: convert try_to_generate_entropy() to queued_entropy API

Message ID 20200921075857.4424-14-nstange@suse.de (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series random: possible ways towards NIST SP800-90B compliance | expand

Commit Message

Nicolai Stange Sept. 21, 2020, 7:58 a.m. UTC
In an effort to drop __credit_entropy_bits_fast() in favor of the new
__queue_entropy()/__dispatch_queued_entropy_fast() API, convert
try_to_generate_entropy() from the former to the latter.

Replace the call to __credit_entropy_bits_fast() from the timer callback,
entropy_timer(), by a queue_entropy() operation. Dispatch it from the loop
in try_to_generate_entropy() by invoking __dispatch_queued_entropy_fast()
after the timestamp has been mixed into the input_pool.

In order to provide the timer callback and try_to_generate_entropy() with
access to a common struct queued_entropy instance, move the currently
anonymous struct definition from the local 'stack' variable declaration in
try_to_generate_entropy() to file scope and assign it a name,
"struct try_to_generate_entropy_stack". Make entropy_timer() obtain a
pointer to the corresponding instance by means of container_of() on the
->timer member contained therein. Amend struct
try_to_generate_entropy_stack by a new member ->q of type struct
queued_entropy.

Note that the described scheme alters behaviour a bit: first of all, new
entropy credit now gets only dispatched to the pool after the actual mixing
has completed rather than in an unsynchronized manner directly from the
timer callback. As the mixing loop try_to_generate_entropy() is expected to
run at higher frequency than the timer, this is unlikely to make any
difference in practice.

Furthermore, the pool entropy watermark as tracked over the period from
queuing the entropy in the timer callback and to its subsequent dispatch
from try_to_generate_entropy() is now taken into account when calculating
the actual credit at dispatch. In consequence, the amount of new entropy
dispatched to the pool will potentially be lowered if said period happens
to overlap with the pool extraction from an initial crng_reseed() on the
primary_crng. However, as getting the primary_crng seeded is the whole
point of the try_to_generate_entropy() exercise, this won't matter.

Note that instead of calling queue_entropy() from the timer callback,
an alternative would have been to maintain an invocation counter and queue
that up from try_to_generate_entropy() right before the mix operation.
This would have reduced the described effect of the pool's entropy
watermark and in fact matched the intended queue_entropy() API usage
better. However, in this particular case of try_to_generate_entropy(),
jitter is desired and invoking queue_entropy() with its buffer locking etc.
from the timer callback could potentially contribute to that.

Signed-off-by: Nicolai Stange <nstange@suse.de>
---
 drivers/char/random.c | 42 +++++++++++++++++++++++++++++-------------
 1 file changed, 29 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/drivers/char/random.c b/drivers/char/random.c
index bd3774c6be4b..dfbe49fdbcf1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1911,6 +1911,12 @@  void get_random_bytes(void *buf, int nbytes)
 EXPORT_SYMBOL(get_random_bytes);
 
 
+struct try_to_generate_entropy_stack {
+	unsigned long now;
+	struct timer_list timer;
+	struct queued_entropy q;
+} stack;
+
 /*
  * Each time the timer fires, we expect that we got an unpredictable
  * jump in the cycle counter. Even if the timer is running on another
@@ -1926,14 +1932,10 @@  EXPORT_SYMBOL(get_random_bytes);
  */
 static void entropy_timer(struct timer_list *t)
 {
-	bool reseed;
-	unsigned long flags;
+	struct try_to_generate_entropy_stack *stack;
 
-	spin_lock_irqsave(&input_pool.lock, flags);
-	reseed = __credit_entropy_bits_fast(&input_pool, 1);
-	spin_unlock_irqrestore(&input_pool.lock, flags);
-	if (reseed)
-		crng_reseed(&primary_crng, &input_pool);
+	stack = container_of(t, struct try_to_generate_entropy_stack, timer);
+	queue_entropy(&input_pool, &stack->q, 1 << ENTROPY_SHIFT);
 }
 
 /*
@@ -1942,10 +1944,9 @@  static void entropy_timer(struct timer_list *t)
  */
 static void try_to_generate_entropy(void)
 {
-	struct {
-		unsigned long now;
-		struct timer_list timer;
-	} stack;
+	struct try_to_generate_entropy_stack stack = { 0 };
+	unsigned long flags;
+	bool reseed;
 
 	stack.now = random_get_entropy();
 
@@ -1957,14 +1958,29 @@  static void try_to_generate_entropy(void)
 	while (!crng_ready()) {
 		if (!timer_pending(&stack.timer))
 			mod_timer(&stack.timer, jiffies+1);
-		mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+		spin_lock_irqsave(&input_pool.lock, flags);
+		__mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+		reseed = __dispatch_queued_entropy_fast(&input_pool, &stack.q);
+		spin_unlock_irqrestore(&input_pool.lock, flags);
+
+		if (reseed)
+			crng_reseed(&primary_crng, &input_pool);
+
 		schedule();
 		stack.now = random_get_entropy();
 	}
 
 	del_timer_sync(&stack.timer);
 	destroy_timer_on_stack(&stack.timer);
-	mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+	spin_lock_irqsave(&input_pool.lock, flags);
+	__mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+	/*
+	 * Must be called here once more in order to complete a
+	 * previously unmatched queue_entropy() from entropy_timer(),
+	 * if any.
+	 */
+	__dispatch_queued_entropy_fast(&input_pool, &stack.q);
+	spin_unlock_irqrestore(&input_pool.lock, flags);
 }
 
 /*