aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-29 18:08:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-29 18:08:48 -0700
commitd0db3a397d067a6c542a1a02da004b7921b4570b (patch)
tree3e0836126e0254688ed0556b669f9380599c0845 /drivers/md/dm-cache-target.c
parent86ea07ca846a7c352f39dd0b7d81f15f403c7db8 (diff)
parent795e633a2dc6cbbeac68bc7f6006082150d38bb7 (diff)
Merge tag 'dm-4.2-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: - fix DM thinp to consistently return -ENOSPC when out of data space - fix a logic bug in the DM cache smq policy's creation error path - revert a DM cache 4.2-rc3 change that reduced writeback efficiency - fix a hang on DM cache device destruction due to improper prealloc_used accounting introduced in 4.2-rc3 - update URL for dm-crypt wiki page * tag 'dm-4.2-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm cache: fix device destroy hang due to improper prealloc_used accounting Revert "dm cache: do not wake_worker() in free_migration()" dm crypt: update wiki page URL dm cache policy smq: fix alloc_bitset check that always evaluates as false dm thin: return -ENOSPC when erroring retry list due to out of data space
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b680da5d7b93..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
wake_up(&cache->migration_wait);
mempool_free(mg, cache->migration_pool);
+ wake_worker(cache);
}
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
bio_list_merge(&cache->deferred_bios, &bios);
@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
process_discard_bio(cache, &structs, bio);
else
process_bio(cache, &structs, bio);
- prealloc_used = true;
}
if (prealloc_used)
@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
list_splice(&cells, &cache->deferred_cells);
@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
}
process_cell(cache, &structs, cell);
- prealloc_used = true;
}
if (prealloc_used)
@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
break; /* no work to do */
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs) ||
get_cell(cache, oblock, &structs, &old_ocell)) {
policy_set_dirty(cache->policy, oblock);
@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
}
writeback(cache, &structs, oblock, cblock, old_ocell);
- prealloc_used = true;
}
if (prealloc_used)