@@ -183,25 +183,38 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat)
shuffle_zone(z);
}
+struct batched_bit_entropy {
+ unsigned long entropy_bool;
+ int position;
+};
+
+static DEFINE_PER_CPU(struct batched_bit_entropy, batched_entropy_bool);
+
void add_to_free_area_random(struct page *page, struct free_area *area,
int migratetype)
{
- static u64 rand;
- static u8 rand_bits;
+ struct batched_bit_entropy *batch;
+ unsigned long entropy;
+ int position;
/*
- * The lack of locking is deliberate. If 2 threads race to
- * update the rand state it just adds to the entropy.
+ * We shouldn't need to disable IRQs as the only caller is
+ * __free_one_page and it should only be called with the zone lock
+ * held and either from IRQ context or with local IRQs disabled.
*/
- if (rand_bits == 0) {
- rand_bits = 64;
- rand = get_random_u64();
+ batch = raw_cpu_ptr(&batched_entropy_bool);
+ position = batch->position;
+
+ if (--position < 0) {
+ batch->entropy_bool = get_random_long();
+ position = BITS_PER_LONG - 1;
}
- if (rand & 1)
+ batch->position = position;
+ entropy = batch->entropy_bool;
+
+ if (1ul & (entropy >> position))
add_to_free_area(page, area, migratetype);
else
add_to_free_area_tail(page, area, migratetype);
- rand_bits--;
- rand >>= 1;
}