aboutsummaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c46
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/null_blk/zoned.c16
-rw-r--r--drivers/block/rbd.c150
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/ublk_drv.c11
-rw-r--r--drivers/block/virtio_blk.c34
-rw-r--r--drivers/block/zram/zram_drv.c32
10 files changed, 193 insertions, 109 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index e460c9799d9f..2b98114a9fe0 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1547,7 +1547,6 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
rel_fdc();
return -EBUSY;
}
- fsync_bdev(bdev);
if (fd_motor_on(drive) == 0) {
rel_fdc();
return -ENODEV;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2db9b186b977..ea4eb88a2e45 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3255,7 +3255,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!disk || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(disk->part0, true);
+ disk_force_media_change(disk);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 37511d2b2caf..9f2d412fc560 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -603,7 +603,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_err;
/* and ... switch */
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
@@ -1067,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
@@ -1171,7 +1171,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
if (!release)
blk_mq_unfreeze_queue(lo->lo_queue);
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
@@ -1775,14 +1775,43 @@ static const struct block_device_operations lo_fops = {
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
+ * the default isn't a hard limit (as before commit 85c50197716c
+ * changed the default value from 0 for max_loop=0 reasons), just
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
-module_param(max_loop, int, 0444);
+
+#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+static bool max_loop_specified;
+
+static int max_loop_param_set_int(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret < 0)
+ return ret;
+
+ max_loop_specified = true;
+ return 0;
+}
+
+static const struct kernel_param_ops max_loop_param_ops = {
+ .set = max_loop_param_set_int,
+ .get = param_get_int,
+};
+
+module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
+#else
+module_param(max_loop, int, 0444);
+MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
+#endif
+
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
@@ -2093,14 +2122,18 @@ static void loop_remove(struct loop_device *lo)
put_disk(lo->lo_disk);
}
+#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
static void loop_probe(dev_t dev)
{
int idx = MINOR(dev) >> part_shift;
- if (max_loop && idx >= max_loop)
+ if (max_loop_specified && max_loop && idx >= max_loop)
return;
loop_add(idx);
}
+#else
+#define loop_probe NULL
+#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
static int loop_control_remove(int idx)
{
@@ -2281,6 +2314,9 @@ module_exit(loop_exit);
static int __init max_loop_setup(char *str)
{
max_loop = simple_strtol(str, NULL, 0);
+#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+ max_loop_specified = true;
+#endif
return 1;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 8576d696c7a2..42e0159bb258 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1434,12 +1434,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
return ret;
}
-static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
- struct block_device *bdev)
+static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
+ blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
- __invalidate_device(bdev, true);
- nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -1465,7 +1463,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_DISCONNECT:
return nbd_disconnect(nbd);
case NBD_CLEAR_SOCK:
- nbd_clear_sock_ioctl(nbd, bdev);
+ nbd_clear_sock_ioctl(nbd);
return 0;
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 635ce0648133..55c5b48bc276 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb)
disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-
- if (queue_is_mq(q)) {
- int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
-
- if (ret)
- return ret;
- } else {
- blk_queue_chunk_sectors(q, dev->zone_size_sects);
- nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- }
-
+ blk_queue_chunk_sectors(q, dev->zone_size_sects);
+ nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
+ if (queue_is_mq(q))
+ return blk_revalidate_disk_zones(nullb->disk, NULL);
+
return 0;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bd0e075a5d89..2328cc05be36 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3675,7 +3675,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
RBD_LOCK_TAG, "", 0);
- if (ret)
+ if (ret && ret != -EEXIST)
return ret;
__rbd_lock(rbd_dev, cookie);
@@ -3849,51 +3849,82 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
-static int get_lock_owner_info(struct rbd_device *rbd_dev,
- struct ceph_locker **lockers, u32 *num_lockers)
+static bool locker_equal(const struct ceph_locker *lhs,
+ const struct ceph_locker *rhs)
+{
+ return lhs->id.name.type == rhs->id.name.type &&
+ lhs->id.name.num == rhs->id.name.num &&
+ !strcmp(lhs->id.cookie, rhs->id.cookie) &&
+ ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
+}
+
+static void free_locker(struct ceph_locker *locker)
+{
+ if (locker)
+ ceph_free_lockers(locker, 1);
+}
+
+static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct ceph_locker *lockers;
+ u32 num_lockers;
u8 lock_type;
char *lock_tag;
+ u64 handle;
int ret;
- dout("%s rbd_dev %p\n", __func__, rbd_dev);
-
ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
- &lock_type, &lock_tag, lockers, num_lockers);
- if (ret)
- return ret;
+ &lock_type, &lock_tag, &lockers, &num_lockers);
+ if (ret) {
+ rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
+ return ERR_PTR(ret);
+ }
- if (*num_lockers == 0) {
+ if (num_lockers == 0) {
dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
+ lockers = NULL;
goto out;
}
if (strcmp(lock_tag, RBD_LOCK_TAG)) {
rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
lock_tag);
- ret = -EBUSY;
- goto out;
+ goto err_busy;
}
- if (lock_type == CEPH_CLS_LOCK_SHARED) {
- rbd_warn(rbd_dev, "shared lock type detected");
- ret = -EBUSY;
- goto out;
+ if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
+ rbd_warn(rbd_dev, "incompatible lock type detected");
+ goto err_busy;
}
- if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
- strlen(RBD_LOCK_COOKIE_PREFIX))) {
+ WARN_ON(num_lockers != 1);
+ ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
+ &handle);
+ if (ret != 1) {
rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
- (*lockers)[0].id.cookie);
- ret = -EBUSY;
- goto out;
+ lockers[0].id.cookie);
+ goto err_busy;
+ }
+ if (ceph_addr_is_blank(&lockers[0].info.addr)) {
+ rbd_warn(rbd_dev, "locker has a blank address");
+ goto err_busy;
}
+ dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
+ __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
+ &lockers[0].info.addr.in_addr,
+ le32_to_cpu(lockers[0].info.addr.nonce), handle);
+
out:
kfree(lock_tag);
- return ret;
+ return lockers;
+
+err_busy:
+ kfree(lock_tag);
+ ceph_free_lockers(lockers, num_lockers);
+ return ERR_PTR(-EBUSY);
}
static int find_watcher(struct rbd_device *rbd_dev,
@@ -3909,8 +3940,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, &watchers,
&num_watchers);
- if (ret)
+ if (ret) {
+ rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
return ret;
+ }
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
for (i = 0; i < num_watchers; i++) {
@@ -3947,51 +3980,72 @@ out:
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
struct ceph_client *client = rbd_dev->rbd_client->client;
- struct ceph_locker *lockers;
- u32 num_lockers;
+ struct ceph_locker *locker, *refreshed_locker;
int ret;
for (;;) {
+ locker = refreshed_locker = NULL;
+
ret = rbd_lock(rbd_dev);
- if (ret != -EBUSY)
- return ret;
+ if (!ret)
+ goto out;
+ if (ret != -EBUSY) {
+ rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+ goto out;
+ }
/* determine if the current lock holder is still alive */
- ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
- if (ret)
- return ret;
-
- if (num_lockers == 0)
+ locker = get_lock_owner_info(rbd_dev);
+ if (IS_ERR(locker)) {
+ ret = PTR_ERR(locker);
+ locker = NULL;
+ goto out;
+ }
+ if (!locker)
goto again;
- ret = find_watcher(rbd_dev, lockers);
+ ret = find_watcher(rbd_dev, locker);
if (ret)
goto out; /* request lock or error */
+ refreshed_locker = get_lock_owner_info(rbd_dev);
+ if (IS_ERR(refreshed_locker)) {
+ ret = PTR_ERR(refreshed_locker);
+ refreshed_locker = NULL;
+ goto out;
+ }
+ if (!refreshed_locker ||
+ !locker_equal(locker, refreshed_locker))
+ goto again;
+
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
- ENTITY_NAME(lockers[0].id.name));
+ ENTITY_NAME(locker->id.name));
ret = ceph_monc_blocklist_add(&client->monc,
- &lockers[0].info.addr);
+ &locker->info.addr);
if (ret) {
- rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
- ENTITY_NAME(lockers[0].id.name), ret);
+ rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
+ ENTITY_NAME(locker->id.name), ret);
goto out;
}
ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
- lockers[0].id.cookie,
- &lockers[0].id.name);
- if (ret && ret != -ENOENT)
+ locker->id.cookie, &locker->id.name);
+ if (ret && ret != -ENOENT) {
+ rbd_warn(rbd_dev, "failed to break header lock: %d",
+ ret);
goto out;
+ }
again:
- ceph_free_lockers(lockers, num_lockers);
+ free_locker(refreshed_locker);
+ free_locker(locker);
}
out:
- ceph_free_lockers(lockers, num_lockers);
+ free_locker(refreshed_locker);
+ free_locker(locker);
return ret;
}
@@ -4041,11 +4095,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
- rbd_warn(rbd_dev, "failed to lock header: %d", ret);
- if (ret == -EBLOCKLISTED)
- goto out;
-
- ret = 1; /* request lock anyway */
+ rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
+ goto out;
}
if (ret > 0) {
up_write(&rbd_dev->lock_rwsem);
@@ -6579,12 +6630,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
if (!ret)
ret = -ETIMEDOUT;
- }
- if (ret) {
- rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
- return ret;
+ rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
}
+ if (ret)
+ return ret;
/*
* The lock may have been released by now, unless automatic lock
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index c36d8b1ceeed..39887556cf95 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -25,7 +25,7 @@
static struct device *rnbd_dev;
static const struct class rnbd_dev_class = {
- .name = "rnbd_client",
+ .name = "rnbd-client",
};
static struct kobject *rnbd_devs_kobj;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 1c823750c95a..21d2e71c5514 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1847,7 +1847,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ublksrv_pid <= 0)
return -EINVAL;
- wait_for_completion_interruptible(&ub->completion);
+ if (wait_for_completion_interruptible(&ub->completion) != 0)
+ return -EINTR;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
@@ -2125,8 +2126,8 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
* - the device number is freed already, we will not find this
* device via ublk_get_device_from_id()
*/
- wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
-
+ if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
+ return -EINTR;
return 0;
}
@@ -2323,7 +2324,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
- wait_for_completion_interruptible(&ub->completion);
+ if (wait_for_completion_interruptible(&ub->completion))
+ return -EINTR;
+
pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b47358da92a2..1fe011676d07 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
{
u32 v, wg;
u8 model;
- int ret;
virtio_cread(vdev, struct virtio_blk_config,
zoned.model, &model);
@@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
vblk->zone_sectors);
return -ENODEV;
}
+ blk_queue_chunk_sectors(q, vblk->zone_sectors);
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
@@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
blk_queue_max_discard_sectors(q, 0);
}
- ret = blk_revalidate_disk_zones(vblk->disk, NULL);
- if (!ret) {
- virtio_cread(vdev, struct virtio_blk_config,
- zoned.max_append_sectors, &v);
- if (!v) {
- dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
- return -ENODEV;
- }
- if ((v << SECTOR_SHIFT) < wg) {
- dev_err(&vdev->dev,
- "write granularity %u exceeds max_append_sectors %u limit\n",
- wg, v);
- return -ENODEV;
- }
-
- blk_queue_max_zone_append_sectors(q, v);
- dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.max_append_sectors, &v);
+ if (!v) {
+ dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
+ return -ENODEV;
+ }
+ if ((v << SECTOR_SHIFT) < wg) {
+ dev_err(&vdev->dev,
+ "write granularity %u exceeds max_append_sectors %u limit\n",
+ wg, v);
+ return -ENODEV;
}
+ blk_queue_max_zone_append_sectors(q, v);
+ dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
- return ret;
+ return blk_revalidate_disk_zones(vblk->disk, NULL);
}
#else
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5676e6dd5b16..06673c6ca255 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
@@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
@@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}