From dd22f551ac0ad366f92f601835f6623b83adc331 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 7 Jan 2015 18:05:34 +0200 Subject: block: Change direct_access calling convention In order to support accesses to larger chunks of memory, pass in a 'size' parameter (counted in bytes), and return the amount available at that address. Add a new helper function, bdev_direct_access(), to handle common functionality including partition handling, checking the length requested is positive, checking for the sector being page-aligned, and checking the length of the request does not pass the end of the partition. Signed-off-by: Matthew Wilcox Reviewed-by: Jan Kara Reviewed-by: Boaz Harrosh Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 92f4b4b288dd..e9086be6d9a0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1601,8 +1601,8 @@ struct block_device_operations { int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - int (*direct_access) (struct block_device *, sector_t, - void **, unsigned long *); + long (*direct_access)(struct block_device *, sector_t, + void **, unsigned long *pfn, long size); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ @@ -1620,6 +1620,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); +extern long bdev_direct_access(struct block_device *, sector_t, void **addr, + unsigned long *pfn, long size); #else /* CONFIG_BLOCK */ struct block_device; -- cgit From d93ba7a5a97c9f315bacdcdb8de4e5f368e7b396 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 20 Jan 2015 20:06:30 -0500 Subject: block: Add discard flag to blkdev_issue_zeroout() function blkdev_issue_discard() will zero a given block range. This is done by way of explicit writing, thus provisioning or allocating the blocks on disk. There are use cases where the desired behavior is to zero the blocks but unprovision them if possible. The blocks must deterministically contain zeroes when they are subsequently read back. This patch adds a flag to blkdev_issue_zeroout() that provides this variant. If the discard flag is set and a block device guarantees discard_zeroes_data we will use REQ_DISCARD to clear the block range. If the device does not support discard_zeroes_data or if the discard request fails we will fall back to first REQ_WRITE_SAME and then a regular REQ_WRITE. Also update the callers of blkdev_issue_zero() to reflect the new flag and make sb_issue_zeroout() prefer the discard approach. Signed-off-by: Martin K. Petersen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-lib.c | 30 ++++++++++++++++++++++++++---- block/ioctl.c | 2 +- drivers/block/drbd/drbd_receiver.c | 2 +- include/linux/blkdev.h | 4 ++-- 4 files changed, 30 insertions(+), 8 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-lib.c b/block/blk-lib.c index 8411be3c19d3..715e948f58a4 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -283,23 +283,45 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) + * @discard: whether to discard the block range * * Description: - * Generate and issue number of bios with zerofiled pages. + + * Zero-fill a block range. If the discard flag is set and the block + * device guarantees that subsequent READ operations to the block range + * in question will return zeroes, the blocks will be discarded. Should + * the discard request fail, if the discard flag is not set, or if + * discard_zeroes_data is not supported, this function will resort to + * zeroing the blocks manually, thus provisioning (allocating, + * anchoring) them. If the block device supports the WRITE SAME command + * blkdev_issue_zeroout() will use it to optimize the process of + * clearing the block range. Otherwise the zeroing will be performed + * using regular WRITE calls. */ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask) + sector_t nr_sects, gfp_t gfp_mask, bool discard) { + struct request_queue *q = bdev_get_queue(bdev); + unsigned char bdn[BDEVNAME_SIZE]; + + if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data) { + + if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0)) + return 0; + + bdevname(bdev, bdn); + pr_warn("%s: DISCARD failed. Manually zeroing.\n", bdn); + } + if (bdev_write_same(bdev)) { - unsigned char bdn[BDEVNAME_SIZE]; if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, ZERO_PAGE(0))) return 0; bdevname(bdev, bdn); - pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn); + pr_warn("%s: WRITE SAME failed. Manually zeroing.\n", bdn); } return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); diff --git a/block/ioctl.c b/block/ioctl.c index 6c7bf903742f..7d8befde2aca 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -198,7 +198,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start, if (start + len > (i_size_read(bdev->bd_inode) >> 9)) return -EINVAL; - return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL); + return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false); } static int put_ushort(unsigned long arg, unsigned short val) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d169b4a79267..cee20354ac37 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1388,7 +1388,7 @@ int drbd_submit_peer_request(struct drbd_device *device, list_add_tail(&peer_req->w.list, &device->active_ee); spin_unlock_irq(&device->resource->req_lock); if (blkdev_issue_zeroout(device->ldev->backing_bdev, - sector, data_size >> 9, GFP_NOIO)) + sector, data_size >> 9, GFP_NOIO, false)) peer_req->flags |= EE_WAS_ERROR; drbd_endio_write_sec_final(peer_req); return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e9086be6d9a0..4c4b732d7556 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1162,7 +1162,7 @@ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask); + sector_t nr_sects, gfp_t gfp_mask, bool discard); static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { @@ -1176,7 +1176,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, return blkdev_issue_zeroout(sb->s_bdev, block << (sb->s_blocksize_bits - 9), nr_blocks << (sb->s_blocksize_bits - 9), - gfp_mask); + gfp_mask, true); } extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); -- cgit From ee1b6f7aff94019c09e73837054979063f722046 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 15 Jan 2015 17:32:25 -0800 Subject: block: support different tag allocation policy The libata tag allocation is using a round-robin policy. Next patch will make libata use block generic tag allocation, so let's add a policy to tag allocation. Currently two policies: FIFO (default) and round-robin. Cc: Jens Axboe Cc: Tejun Heo Cc: Christoph Hellwig Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-tag.c | 33 +++++++++++++++++++++++++-------- drivers/block/osdblk.c | 2 +- drivers/scsi/scsi_scan.c | 3 ++- include/linux/blkdev.h | 8 ++++++-- include/scsi/scsi_host.h | 3 +++ include/scsi/scsi_tcq.h | 3 ++- 6 files changed, 39 insertions(+), 13 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-tag.c b/block/blk-tag.c index a185b86741e5..f0344e6939d5 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -119,7 +119,7 @@ fail: } static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, - int depth) + int depth, int alloc_policy) { struct blk_queue_tag *tags; @@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, goto fail; atomic_set(&tags->refcnt, 1); + tags->alloc_policy = alloc_policy; + tags->next_tag = 0; return tags; fail: kfree(tags); @@ -140,10 +142,11 @@ fail: /** * blk_init_tags - initialize the tag info for an external tag map * @depth: the maximum queue depth supported + * @alloc_policy: tag allocation policy **/ -struct blk_queue_tag *blk_init_tags(int depth) +struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy) { - return __blk_queue_init_tags(NULL, depth); + return __blk_queue_init_tags(NULL, depth, alloc_policy); } EXPORT_SYMBOL(blk_init_tags); @@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use + * @alloc_policy: tag allocation policy * * Queue lock must be held here if the function is called to resize an * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, - struct blk_queue_tag *tags) + struct blk_queue_tag *tags, int alloc_policy) { int rc; BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { - tags = __blk_queue_init_tags(q, depth); + tags = __blk_queue_init_tags(q, depth, alloc_policy); if (!tags) return -ENOMEM; @@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) } do { - tag = find_first_zero_bit(bqt->tag_map, max_depth); - if (tag >= max_depth) - return 1; + if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) { + tag = find_first_zero_bit(bqt->tag_map, max_depth); + if (tag >= max_depth) + return 1; + } else { + int start = bqt->next_tag; + int size = min_t(int, bqt->max_depth, max_depth + start); + tag = find_next_zero_bit(bqt->tag_map, size, start); + if (tag >= size && start + size > bqt->max_depth) { + size = start + size - bqt->max_depth; + tag = find_first_zero_bit(bqt->tag_map, size); + } + if (tag >= size) + return 1; + } } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* @@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) * See blk_queue_end_tag for details. */ + bqt->next_tag = (tag + 1) % bqt->max_depth; rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 79aa179305b5..e22942596207 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -423,7 +423,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev) } /* switch queue to TCQ mode; allocate tag map */ - rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL); + rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO); if (rc) { blk_cleanup_queue(q); put_disk(disk); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 983aed10ff2f..921a8c897eb2 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -290,7 +290,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, if (!shost_use_blk_mq(sdev->host) && (shost->bqt || shost->hostt->use_blk_tags)) { blk_queue_init_tags(sdev->request_queue, - sdev->host->cmd_per_lun, shost->bqt); + sdev->host->cmd_per_lun, shost->bqt, + shost->hostt->tag_alloc_policy); } scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4c4b732d7556..6f388fd1c11c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -272,7 +272,11 @@ struct blk_queue_tag { int max_depth; /* what we will send to device */ int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ + int alloc_policy; /* tag allocation policy */ + int next_tag; /* next tag */ }; +#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ +#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) @@ -1139,11 +1143,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); -extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); extern void blk_queue_free_tags(struct request_queue *); extern int blk_queue_resize_tags(struct request_queue *, int); extern void blk_queue_invalidate_tags(struct request_queue *); -extern struct blk_queue_tag *blk_init_tags(int); +extern struct blk_queue_tag *blk_init_tags(int, int); extern void blk_free_tags(struct blk_queue_tag *); static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 019e66858ce6..e113c757d555 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -402,6 +402,9 @@ struct scsi_host_template { */ unsigned char present; + /* If use block layer to manage tags, this is tag allocation policy */ + int tag_alloc_policy; + /* * Let the block layer assigns tags to all commands. */ diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 9708b28bd2aa..b27977e8aaed 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -66,7 +66,8 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) * devices on the shared host (for libata) */ if (!shost->bqt) { - shost->bqt = blk_init_tags(depth); + shost->bqt = blk_init_tags(depth, + shost->hostt->tag_alloc_policy); if (!shost->bqt) return -ENOMEM; } -- cgit From ad9cf3bbd18a94806314741ac8092c3422f5aebe Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 16 Dec 2014 12:54:25 -0500 Subject: block: mark blk-mq devices as stackable Commit 4ee5eaf4 ("block: add a queue flag for request stacking support") introduced the concept of "STACKABLE" and blk-mq devices fit the definition in that they establish q->request_fn. So establish QUEUE_FLAG_STACKABLE in QUEUE_FLAG_MQ_DEFAULT. While not strictly needed (DM _could_ just check for q->mq_ops to assume the device is request-based), request-based DM support for blk-mq devices benefits from the ability to consistently check for QUEUE_FLAG_STACKABLE before allowing a device to be stacked into a request-based DM table. Signed-off-by: Mike Snitzer Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6f388fd1c11c..13e16401a7ce 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -520,6 +520,7 @@ struct request_queue { (1 << QUEUE_FLAG_ADD_RANDOM)) #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_SAME_COMP)) static inline void queue_lockdep_assert_held(struct request_queue *q) -- cgit From 26e49cfc7e988a76bf1e55cef0d9e438e5489180 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 18 Jan 2015 16:16:31 +0100 Subject: block: pass iov_iter to the BLOCK_PC mapping functions Make use of a new interface provided by iov_iter, backed by scatter-gather list of iovec, instead of the old interface based on sg_iovec. Also use iov_iter_advance() instead of manual iteration. This commit should contain only literal replacements, without functional changes. Cc: Christoph Hellwig Cc: Jens Axboe Cc: Doug Gilbert Cc: "James E.J. Bottomley" Signed-off-by: Kent Overstreet [dpark: add more description in commit message] Signed-off-by: Dongsu Park [hch: fixed to do a deep clone of the iov_iter, and to properly use the iov_iter direction] Signed-off-by: Christoph Hellwig Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/bio.c | 146 +++++++++++++++++++++++-------------------------- block/blk-map.c | 38 ++++++------- block/scsi_ioctl.c | 17 ++---- drivers/scsi/sg.c | 15 ++--- include/linux/bio.h | 7 +-- include/linux/blkdev.h | 4 +- 6 files changed, 101 insertions(+), 126 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/bio.c b/block/bio.c index 0895f694f440..7d8c6555e3f3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -28,7 +28,6 @@ #include #include #include -#include /* for struct sg_iovec */ #include @@ -1022,21 +1021,11 @@ void bio_copy_data(struct bio *dst, struct bio *src) EXPORT_SYMBOL(bio_copy_data); struct bio_map_data { - int nr_sgvecs; int is_our_pages; - struct sg_iovec sgvecs[]; + struct iov_iter iter; + struct iovec iov[]; }; -static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, - const struct sg_iovec *iov, int iov_count, - int is_our_pages) -{ - memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); - bmd->nr_sgvecs = iov_count; - bmd->is_our_pages = is_our_pages; - bio->bi_private = bmd; -} - static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, gfp_t gfp_mask) { @@ -1044,36 +1033,33 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, return NULL; return kmalloc(sizeof(struct bio_map_data) + - sizeof(struct sg_iovec) * iov_count, gfp_mask); + sizeof(struct iovec) * iov_count, gfp_mask); } -static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, - int to_user, int from_user) +static int __bio_copy_iov(struct bio *bio, const struct iov_iter *iter, + int to_user, int from_user) { int ret = 0, i; struct bio_vec *bvec; - int iov_idx = 0; - unsigned int iov_off = 0; + struct iov_iter iov_iter = *iter; bio_for_each_segment_all(bvec, bio, i) { char *bv_addr = page_address(bvec->bv_page); unsigned int bv_len = bvec->bv_len; - while (bv_len && iov_idx < iov_count) { - unsigned int bytes; - char __user *iov_addr; - - bytes = min_t(unsigned int, - iov[iov_idx].iov_len - iov_off, bv_len); - iov_addr = iov[iov_idx].iov_base + iov_off; + while (bv_len && iov_iter.count) { + struct iovec iov = iov_iter_iovec(&iov_iter); + unsigned int bytes = min_t(unsigned int, bv_len, + iov.iov_len); if (!ret) { if (to_user) - ret = copy_to_user(iov_addr, bv_addr, - bytes); + ret = copy_to_user(iov.iov_base, + bv_addr, bytes); if (from_user) - ret = copy_from_user(bv_addr, iov_addr, + ret = copy_from_user(bv_addr, + iov.iov_base, bytes); if (ret) @@ -1082,13 +1068,7 @@ static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_c bv_len -= bytes; bv_addr += bytes; - iov_addr += bytes; - iov_off += bytes; - - if (iov[iov_idx].iov_len == iov_off) { - iov_idx++; - iov_off = 0; - } + iov_iter_advance(&iov_iter, bytes); } } @@ -1122,7 +1102,7 @@ int bio_uncopy_user(struct bio *bio) * don't copy into a random user address space, just free. */ if (current->mm) - ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, + ret = __bio_copy_iov(bio, &bmd->iter, bio_data_dir(bio) == READ, 0); if (bmd->is_our_pages) bio_free_pages(bio); @@ -1135,12 +1115,10 @@ EXPORT_SYMBOL(bio_uncopy_user); /** * bio_copy_user_iov - copy user data to bio - * @q: destination block queue - * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @iov: the iovec. - * @iov_count: number of elements in the iovec - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags + * @q: destination block queue + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @iter: iovec iterator + * @gfp_mask: memory allocation flags * * Prepares and returns a bio for indirect user io, bouncing data * to/from kernel pages as necessary. Must be paired with @@ -1148,24 +1126,25 @@ EXPORT_SYMBOL(bio_uncopy_user); */ struct bio *bio_copy_user_iov(struct request_queue *q, struct rq_map_data *map_data, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) + const struct iov_iter *iter, + gfp_t gfp_mask) { struct bio_map_data *bmd; struct page *page; struct bio *bio; int i, ret; int nr_pages = 0; - unsigned int len = 0; + unsigned int len = iter->count; unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; - for (i = 0; i < iov_count; i++) { + for (i = 0; i < iter->nr_segs; i++) { unsigned long uaddr; unsigned long end; unsigned long start; - uaddr = (unsigned long)iov[i].iov_base; - end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; + uaddr = (unsigned long) iter->iov[i].iov_base; + end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) + >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT; /* @@ -1175,22 +1154,31 @@ struct bio *bio_copy_user_iov(struct request_queue *q, return ERR_PTR(-EINVAL); nr_pages += end - start; - len += iov[i].iov_len; } if (offset) nr_pages++; - bmd = bio_alloc_map_data(iov_count, gfp_mask); + bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); if (!bmd) return ERR_PTR(-ENOMEM); + /* + * We need to do a deep copy of the iov_iter including the iovecs. + * The caller provided iov might point to an on-stack or otherwise + * shortlived one. + */ + bmd->is_our_pages = map_data ? 0 : 1; + memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); + iov_iter_init(&bmd->iter, iter->type, bmd->iov, + iter->nr_segs, iter->count); + ret = -ENOMEM; bio = bio_kmalloc(gfp_mask, nr_pages); if (!bio) goto out_bmd; - if (!write_to_vm) + if (iter->type & WRITE) bio->bi_rw |= REQ_WRITE; ret = 0; @@ -1238,14 +1226,14 @@ struct bio *bio_copy_user_iov(struct request_queue *q, /* * success */ - if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || + if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || (map_data && map_data->from_user)) { - ret = __bio_copy_iov(bio, iov, iov_count, 0, 1); + ret = __bio_copy_iov(bio, iter, 0, 1); if (ret) goto cleanup; } - bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); + bio->bi_private = bmd; return bio; cleanup: if (!map_data) @@ -1258,19 +1246,21 @@ out_bmd: static struct bio *__bio_map_user_iov(struct request_queue *q, struct block_device *bdev, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) + const struct iov_iter *iter, + gfp_t gfp_mask) { - int i, j; + int j; int nr_pages = 0; struct page **pages; struct bio *bio; int cur_page = 0; int ret, offset; + struct iov_iter i; + struct iovec iov; - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; - unsigned long len = iov[i].iov_len; + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; + unsigned long len = iov.iov_len; unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT; @@ -1300,16 +1290,17 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, if (!pages) goto out; - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; - unsigned long len = iov[i].iov_len; + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; + unsigned long len = iov.iov_len; unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT; const int local_nr_pages = end - start; const int page_limit = cur_page + local_nr_pages; ret = get_user_pages_fast(uaddr, local_nr_pages, - write_to_vm, &pages[cur_page]); + (iter->type & WRITE) != WRITE, + &pages[cur_page]); if (ret < local_nr_pages) { ret = -EFAULT; goto out_unmap; @@ -1349,7 +1340,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, /* * set data direction, and check if mapped pages need bouncing */ - if (!write_to_vm) + if (iter->type & WRITE) bio->bi_rw |= REQ_WRITE; bio->bi_bdev = bdev; @@ -1357,10 +1348,10 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, return bio; out_unmap: - for (i = 0; i < nr_pages; i++) { - if(!pages[i]) + for (j = 0; j < nr_pages; j++) { + if (!pages[j]) break; - page_cache_release(pages[i]); + page_cache_release(pages[j]); } out: kfree(pages); @@ -1369,25 +1360,22 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, } /** - * bio_map_user_iov - map user sg_iovec table into bio - * @q: the struct request_queue for the bio - * @bdev: destination block device - * @iov: the iovec. - * @iov_count: number of elements in the iovec - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags + * bio_map_user_iov - map user iovec into bio + * @q: the struct request_queue for the bio + * @bdev: destination block device + * @iter: iovec iterator + * @gfp_mask: memory allocation flags * * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) + const struct iov_iter *iter, + gfp_t gfp_mask) { struct bio *bio; - bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, - gfp_mask); + bio = __bio_map_user_iov(q, bdev, iter, gfp_mask); if (IS_ERR(bio)) return bio; diff --git a/block/blk-map.c b/block/blk-map.c index 152a5fe5d85e..30e6bb871c5c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -5,7 +5,7 @@ #include #include #include -#include /* for struct sg_iovec */ +#include #include "blk.h" @@ -44,9 +44,7 @@ static int __blk_rq_unmap_user(struct bio *bio) * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @iov: pointer to the iovec - * @iov_count: number of elements in the iovec - * @len: I/O byte count + * @iter: iovec iterator * @gfp_mask: memory allocation flags * * Description: @@ -63,20 +61,21 @@ static int __blk_rq_unmap_user(struct bio *bio) * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, - struct rq_map_data *map_data, const struct sg_iovec *iov, - int iov_count, unsigned int len, gfp_t gfp_mask) + struct rq_map_data *map_data, + const struct iov_iter *iter, gfp_t gfp_mask) { struct bio *bio; - int i, read = rq_data_dir(rq) == READ; int unaligned = 0; + struct iov_iter i; + struct iovec iov; - if (!iov || iov_count <= 0) + if (!iter || !iter->count) return -EINVAL; - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; - if (!iov[i].iov_len) + if (!iov.iov_len) return -EINVAL; /* @@ -86,16 +85,15 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, unaligned = 1; } - if (unaligned || (q->dma_pad_mask & len) || map_data) - bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, - gfp_mask); + if (unaligned || (q->dma_pad_mask & iter->count) || map_data) + bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); else - bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); + bio = bio_map_user_iov(q, NULL, iter, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); - if (bio->bi_iter.bi_size != len) { + if (bio->bi_iter.bi_size != iter->count) { /* * Grab an extra reference to this bio, as bio_unmap_user() * expects to be able to drop it twice as it happens on the @@ -121,12 +119,14 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { - struct sg_iovec iov; + struct iovec iov; + struct iov_iter i; - iov.iov_base = (void __user *)ubuf; + iov.iov_base = ubuf; iov.iov_len = len; + iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len); - return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask); + return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); } EXPORT_SYMBOL(blk_rq_map_user); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 28163fad3c5d..e1f71c396193 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -332,7 +332,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ret = 0; if (hdr->iovec_count) { - size_t iov_data_len; + struct iov_iter i; struct iovec *iov = NULL; ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, @@ -342,20 +342,11 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, goto out_free_cdb; } - iov_data_len = ret; - ret = 0; - /* SG_IO howto says that the shorter of the two wins */ - if (hdr->dxfer_len < iov_data_len) { - hdr->iovec_count = iov_shorten(iov, - hdr->iovec_count, - hdr->dxfer_len); - iov_data_len = hdr->dxfer_len; - } + iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count, + min_t(unsigned, ret, hdr->dxfer_len)); - ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov, - hdr->iovec_count, - iov_data_len, GFP_KERNEL); + ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index b14f64cb9724..4052e592818c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1734,22 +1734,19 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) } if (iov_count) { - int len, size = sizeof(struct sg_iovec) * iov_count; + int size = sizeof(struct iovec) * iov_count; struct iovec *iov; + struct iov_iter i; iov = memdup_user(hp->dxferp, size); if (IS_ERR(iov)) return PTR_ERR(iov); - len = iov_length(iov, iov_count); - if (hp->dxfer_len < len) { - iov_count = iov_shorten(iov, iov_count, hp->dxfer_len); - len = hp->dxfer_len; - } + iov_iter_init(&i, rw, iov, iov_count, + min_t(size_t, hp->dxfer_len, + iov_length(iov, iov_count))); - res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, - iov_count, - len, GFP_ATOMIC); + res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, diff --git a/include/linux/bio.h b/include/linux/bio.h index d0d6735d61da..0d6105b34ffa 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -428,11 +428,10 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); extern int bio_get_nr_vecs(struct block_device *); -struct sg_iovec; struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct block_device *, - const struct sg_iovec *, int, int, gfp_t); + const struct iov_iter *, gfp_t); extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, gfp_t); @@ -462,8 +461,8 @@ extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); extern struct bio *bio_copy_user_iov(struct request_queue *, struct rq_map_data *, - const struct sg_iovec *, - int, int, gfp_t); + const struct iov_iter *, + gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 13e16401a7ce..bf4ef666d191 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -855,8 +855,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *, extern int blk_rq_unmap_user(struct bio *); extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(struct request_queue *, struct request *, - struct rq_map_data *, const struct sg_iovec *, - int, unsigned int, gfp_t); + struct rq_map_data *, const struct iov_iter *, + gfp_t); extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, -- cgit From d427e3c82ef4fc5fbb22c0cef0b040e6767b1028 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 11 Feb 2015 14:07:50 +0100 Subject: block: remove unused function blk_bio_map_sg Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 29 ----------------------------- include/linux/blkdev.h | 2 -- 2 files changed, 31 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-merge.c b/block/blk-merge.c index 9476b1528ded..fc1ff3b1ea1f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -283,35 +283,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); -/** - * blk_bio_map_sg - map a bio to a scatterlist - * @q: request_queue in question - * @bio: bio being mapped - * @sglist: scatterlist being mapped - * - * Note: - * Caller must make sure sg can hold bio->bi_phys_segments entries - * - * Will return the number of sg entries setup - */ -int blk_bio_map_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist) -{ - struct scatterlist *sg = NULL; - int nsegs; - struct bio *next = bio->bi_next; - bio->bi_next = NULL; - - nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); - bio->bi_next = next; - if (sg) - sg_mark_end(sg); - - BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); - return nsegs; -} -EXPORT_SYMBOL(blk_bio_map_sg); - static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bf4ef666d191..7f9a516f24de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1049,8 +1049,6 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); -extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); -- cgit