diff options
author | Eric Dumazet <[email protected]> | 2015-02-26 07:20:34 -0800 |
---|---|---|
committer | David S. Miller <[email protected]> | 2015-02-27 17:55:14 -0500 |
commit | 5beb5c90c1f54d745da040aa05634a5830ba4a4c (patch) | |
tree | 4626f1033a05b15b4cd046d066de1011c7a7f67d | |
parent | 061c1a6e367855a9ed1110ba059bc2e7634fd429 (diff) |
rhashtable: use cond_resched()
If a hash table has 128 slots and 16384 elems, expand to 256 slots
takes more than one second. For larger sets, a soft lockup is detected.
Holding cpu for that long, even in a work queue is a show stopper
for non preemptable kernels.
cond_resched() at strategic points to allow process scheduler
to reschedule us.
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
-rw-r--r-- | lib/rhashtable.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 090641db4c0d..b5344ef4c684 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> @@ -412,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht) } } unlock_buckets(new_tbl, old_tbl, new_hash); + cond_resched(); } /* Unzip interleaved hash chains */ @@ -435,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht) complete = false; unlock_buckets(new_tbl, old_tbl, old_hash); + cond_resched(); } } @@ -493,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht) tbl->buckets[new_hash + new_tbl->size]); unlock_buckets(new_tbl, tbl, new_hash); + cond_resched(); } /* Publish the new, valid hash table */ |