diff options
Diffstat (limited to 'mm/kfence/core.c')
| -rw-r--r-- | mm/kfence/core.c | 16 |
1 files changed, 3 insertions, 13 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 141788858b70..5349c37a5dac 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -26,7 +26,6 @@ #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/sched/clock.h> -#include <linux/sched/sysctl.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -360,9 +359,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g unsigned long flags; struct slab *slab; void *addr; - const bool random_right_allocate = prandom_u32_max(2); + const bool random_right_allocate = get_random_u32_below(2); const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && - !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS); + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); /* Try to obtain a free object. */ raw_spin_lock_irqsave(&kfence_freelist_lock, flags); @@ -799,16 +798,7 @@ static void toggle_allocation_gate(struct work_struct *work) /* Enable static key, and await allocation to happen. */ static_branch_enable(&kfence_allocation_key); - if (sysctl_hung_task_timeout_secs) { - /* - * During low activity with no allocations we might wait a - * while; let's avoid the hung task warning. - */ - wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), - sysctl_hung_task_timeout_secs * HZ / 2); - } else { - wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); - } + wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); |