aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/dm-bufio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-bufio.c')
-rw-r--r--drivers/md/dm-bufio.c71
1 files changed, 50 insertions, 21 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5ffa1dcf84cf..09c7ed2650ca 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "bufio"
@@ -81,6 +82,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,7 +93,6 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
@@ -161,23 +163,34 @@ struct dm_buffer {
#endif
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_lock_bh(&c->spinlock);
+ else
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return spin_trylock_bh(&c->spinlock);
+ else
+ return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_unlock_bh(&c->spinlock);
+ else
+ mutex_unlock(&c->lock);
}
/*----------------------------------------------------------------*/
@@ -577,13 +590,12 @@ static void dmio_complete(unsigned long error, void *context)
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
}
-static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
int r;
struct dm_io_request io_req = {
- .bi_op = rw,
- .bi_op_flags = 0,
+ .bi_opf = op,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
@@ -616,7 +628,7 @@ static void bio_complete(struct bio *bio)
b->end_io(b, status);
}
-static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
struct bio *bio;
@@ -630,10 +642,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -669,7 +681,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
+static void submit_io(struct dm_buffer *b, enum req_op op,
+ void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
sector_t sector;
@@ -679,7 +692,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
sector = block_to_sector(b->c, b->block);
- if (rw != REQ_OP_WRITE) {
+ if (op != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
offset = 0;
} else {
@@ -698,9 +711,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, rw, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset);
else
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
}
/*----------------------------------------------------------------
@@ -802,6 +815,10 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
@@ -810,6 +827,9 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
@@ -1341,8 +1361,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1365,8 +1384,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_DISCARD,
- .bi_op_flags = REQ_SYNC,
+ .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1619,7 +1637,8 @@ static void drop_buffers(struct dm_bufio_client *c)
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1717,7 +1736,8 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *))
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags)
{
int r;
struct dm_bufio_client *c;
@@ -1747,12 +1767,18 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
+ c->no_sleep = true;
+ static_branch_inc(&no_sleep_enabled);
+ }
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
@@ -1806,7 +1832,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1878,6 +1905,8 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
kmem_cache_destroy(c->slab_buffer);
dm_io_client_destroy(c->dm_io);
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);