diff options
Diffstat (limited to 'drivers/char/random.c')
| -rw-r--r-- | drivers/char/random.c | 51 | 
1 files changed, 46 insertions, 5 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 0ab024918907..01a260f67437 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,6 +1,9 @@  /*   * random.c -- A strong random number generator   * + * Copyright (C) 2017 Jason A. Donenfeld <[email protected]>. All + * Rights Reserved. + *   * Copyright Matt Mackall <[email protected]>, 2003, 2004, 2005   *   * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All @@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);  static struct crng_state **crng_node_pool __read_mostly;  #endif +static void invalidate_batched_entropy(void); +  static void crng_initialize(struct crng_state *crng)  {  	int		i; @@ -798,12 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)  		p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;  		cp++; crng_init_cnt++; len--;  	} +	spin_unlock_irqrestore(&primary_crng.lock, flags);  	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { +		invalidate_batched_entropy();  		crng_init = 1;  		wake_up_interruptible(&crng_init_wait);  		pr_notice("random: fast init done\n");  	} -	spin_unlock_irqrestore(&primary_crng.lock, flags);  	return 1;  } @@ -835,13 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)  	}  	memzero_explicit(&buf, sizeof(buf));  	crng->init_time = jiffies; +	spin_unlock_irqrestore(&primary_crng.lock, flags);  	if (crng == &primary_crng && crng_init < 2) { +		invalidate_batched_entropy();  		crng_init = 2;  		process_random_ready_list();  		wake_up_interruptible(&crng_init_wait);  		pr_notice("random: crng init done\n");  	} -	spin_unlock_irqrestore(&primary_crng.lock, flags);  }  static inline void crng_wait_ready(void) @@ -1097,12 +1104,16 @@ static void add_interrupt_bench(cycles_t start)  static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)  {  	__u32 *ptr = (__u32 *) regs; +	unsigned int idx;  	if (regs == NULL)  		return 0; -	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) -		f->reg_idx = 0; -	return *(ptr + f->reg_idx++); +	idx = READ_ONCE(f->reg_idx); +	if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) +		idx = 0; +	ptr += idx++; +	WRITE_ONCE(f->reg_idx, idx); +	return *ptr;  }  void add_interrupt_randomness(int irq, int irq_flags) @@ -2019,6 +2030,7 @@ struct batched_entropy {  	};  	unsigned int position;  }; +static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);  /*   * Get a random word for internal kernel use only. The quality of the random @@ -2029,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);  u64 get_random_u64(void)  {  	u64 ret; +	bool use_lock = READ_ONCE(crng_init) < 2; +	unsigned long flags = 0;  	struct batched_entropy *batch;  #if BITS_PER_LONG == 64 @@ -2041,11 +2055,15 @@ u64 get_random_u64(void)  #endif  	batch = &get_cpu_var(batched_entropy_u64); +	if (use_lock) +		read_lock_irqsave(&batched_entropy_reset_lock, flags);  	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {  		extract_crng((u8 *)batch->entropy_u64);  		batch->position = 0;  	}  	ret = batch->entropy_u64[batch->position++]; +	if (use_lock) +		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);  	put_cpu_var(batched_entropy_u64);  	return ret;  } @@ -2055,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);  u32 get_random_u32(void)  {  	u32 ret; +	bool use_lock = READ_ONCE(crng_init) < 2; +	unsigned long flags = 0;  	struct batched_entropy *batch;  	if (arch_get_random_int(&ret))  		return ret;  	batch = &get_cpu_var(batched_entropy_u32); +	if (use_lock) +		read_lock_irqsave(&batched_entropy_reset_lock, flags);  	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {  		extract_crng((u8 *)batch->entropy_u32);  		batch->position = 0;  	}  	ret = batch->entropy_u32[batch->position++]; +	if (use_lock) +		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);  	put_cpu_var(batched_entropy_u32);  	return ret;  }  EXPORT_SYMBOL(get_random_u32); +/* It's important to invalidate all potential batched entropy that might + * be stored before the crng is initialized, which we can do lazily by + * simply resetting the counter to zero so that it's re-extracted on the + * next usage. */ +static void invalidate_batched_entropy(void) +{ +	int cpu; +	unsigned long flags; + +	write_lock_irqsave(&batched_entropy_reset_lock, flags); +	for_each_possible_cpu (cpu) { +		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; +		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; +	} +	write_unlock_irqrestore(&batched_entropy_reset_lock, flags); +} +  /**   * randomize_page - Generate a random, page aligned address   * @start:	The smallest acceptable address the caller will take.  |