diff options
| author | Greg Kroah-Hartman <[email protected]> | 2016-05-09 09:34:49 +0200 |
|---|---|---|
| committer | Greg Kroah-Hartman <[email protected]> | 2016-05-09 09:34:49 +0200 |
| commit | 725d0123dfff3d7b666cf57f5d29c50addfc99d3 (patch) | |
| tree | 12ec63b2c3965033f30185b4b2f5f12039a9a242 /kernel/workqueue.c | |
| parent | 8cd1f9d01cbf4f46eace90d9792e65a209c37d31 (diff) | |
| parent | 44549e8f5eea4e0a41b487b63e616cb089922b99 (diff) | |
Merge 4.6-rc7 into char-misc-testing
This resolves a merge issue with drivers/hv/ring_buffer.c
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2232ae3e3ad6..3bfdff06eea7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, */ smp_wmb(); set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + /* + * The following mb guarantees that previous clear of a PENDING bit + * will not be reordered with any speculative LOADS or STORES from + * work->current_func, which is executed afterwards. This possible + * reordering can lead to a missed execution on attempt to qeueue + * the same @work. E.g. consider this case: + * + * CPU#0 CPU#1 + * ---------------------------- -------------------------------- + * + * 1 STORE event_indicated + * 2 queue_work_on() { + * 3 test_and_set_bit(PENDING) + * 4 } set_..._and_clear_pending() { + * 5 set_work_data() # clear bit + * 6 smp_mb() + * 7 work->current_func() { + * 8 LOAD event_indicated + * } + * + * Without an explicit full barrier speculative LOAD on line 8 can + * be executed before CPU#0 does STORE on line 1. If that happens, + * CPU#0 observes the PENDING bit is still set and new execution of + * a @work is not queued in a hope, that CPU#1 will eventually + * finish the queued @work. Meanwhile CPU#1 does not see + * event_indicated is set, because speculative LOAD was executed + * before actual STORE. + */ + smp_mb(); } static void clear_work_data(struct work_struct *work) |