diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 86 | 
1 files changed, 46 insertions, 40 deletions
| diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3d9cf326574f..adf33079771e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,18 +90,17 @@ struct request {  	struct list_head queuelist;  	union {  		struct call_single_data csd; -		unsigned long fifo_time; +		u64 fifo_time;  	};  	struct request_queue *q;  	struct blk_mq_ctx *mq_ctx; -	u64 cmd_flags; +	int cpu;  	unsigned cmd_type; +	u64 cmd_flags;  	unsigned long atomic_flags; -	int cpu; -  	/* the following two fields are internal, NEVER access directly */  	unsigned int __data_len;	/* total data len */  	sector_t __sector;		/* sector cursor */ @@ -200,6 +199,20 @@ struct request {  	struct request *next_rq;  }; +#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) +#define req_op(req)  ((req)->cmd_flags >> REQ_OP_SHIFT) + +#define req_set_op(req, op) do {				\ +	WARN_ON(op >= (1 << REQ_OP_BITS));			\ +	(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1);	\ +	(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT);	\ +} while (0) + +#define req_set_op_attrs(req, op, flags) do {	\ +	req_set_op(req, op);			\ +	(req)->cmd_flags |= flags;		\ +} while (0) +  static inline unsigned short req_get_ioprio(struct request *req)  {  	return req->ioprio; @@ -483,7 +496,7 @@ struct request_queue {  #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */  #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */  #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */ -#define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */ +#define QUEUE_FLAG_SECERASE    17	/* supports secure erase */  #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */  #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */  #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */ @@ -492,6 +505,7 @@ struct request_queue {  #define QUEUE_FLAG_WC	       23	/* Write back caching */  #define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */  #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */ +#define QUEUE_FLAG_DAX         26	/* device supports DAX */  #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\  				 (1 << QUEUE_FLAG_STACKABLE)	|	\ @@ -579,8 +593,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)  #define blk_queue_stackable(q)	\  	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)  #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \ -	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) +#define blk_queue_secure_erase(q) \ +	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)  #define blk_noretry_request(rq) \  	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)  #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq)		((int)((rq)->cmd_flags & 1)) +#define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)  /*   * Driver can handle struct request, if it either has an old style @@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)  /*   * We regard a request as sync, if either a read or a sync write   */ -static inline bool rw_is_sync(unsigned int rw_flags) +static inline bool rw_is_sync(int op, unsigned int rw_flags)  { -	return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); +	return op == REQ_OP_READ || (rw_flags & REQ_SYNC);  }  static inline bool rq_is_sync(struct request *rq)  { -	return rw_is_sync(rq->cmd_flags); +	return rw_is_sync(req_op(rq), rq->cmd_flags);  }  static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -652,22 +667,10 @@ static inline bool rq_mergeable(struct request *rq)  	if (rq->cmd_type != REQ_TYPE_FS)  		return false; -	if (rq->cmd_flags & REQ_NOMERGE_FLAGS) +	if (req_op(rq) == REQ_OP_FLUSH)  		return false; -	return true; -} - -static inline bool blk_check_merge_flags(unsigned int flags1, -					 unsigned int flags2) -{ -	if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) -		return false; - -	if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) -		return false; - -	if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) +	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)  		return false;  	return true; @@ -786,8 +789,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);  extern void blk_put_request(struct request *);  extern void __blk_put_request(struct request_queue *, struct request *);  extern struct request *blk_get_request(struct request_queue *, int, gfp_t); -extern struct request *blk_make_request(struct request_queue *, struct bio *, -					gfp_t);  extern void blk_rq_set_block_pc(struct request *);  extern void blk_requeue_request(struct request_queue *, struct request *);  extern void blk_add_request_payload(struct request *rq, struct page *page, @@ -800,6 +801,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,  extern void blk_rq_unprep_clone(struct request *rq);  extern int blk_insert_cloned_request(struct request_queue *q,  				     struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio *bio);  extern void blk_delay_queue(struct request_queue *, unsigned long);  extern void blk_queue_split(struct request_queue *, struct bio **,  			    struct bio_set *); @@ -879,12 +881,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)  }  static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, -						     unsigned int cmd_flags) +						     int op)  { -	if (unlikely(cmd_flags & REQ_DISCARD)) +	if (unlikely(op == REQ_OP_DISCARD))  		return min(q->limits.max_discard_sectors, UINT_MAX >> 9); -	if (unlikely(cmd_flags & REQ_WRITE_SAME)) +	if (unlikely(op == REQ_OP_WRITE_SAME))  		return q->limits.max_write_same_sectors;  	return q->limits.max_sectors; @@ -904,18 +906,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,  			(offset & (q->limits.chunk_sectors - 1));  } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq) +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, +						  sector_t offset)  {  	struct request_queue *q = rq->q;  	if (unlikely(rq->cmd_type != REQ_TYPE_FS))  		return q->limits.max_hw_sectors; -	if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) -		return blk_queue_get_max_sectors(q, rq->cmd_flags); +	if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) +		return blk_queue_get_max_sectors(q, req_op(rq)); -	return min(blk_max_size_offset(q, blk_rq_pos(rq)), -			blk_queue_get_max_sectors(q, rq->cmd_flags)); +	return min(blk_max_size_offset(q, offset), +			blk_queue_get_max_sectors(q, req_op(rq)));  }  static inline unsigned int blk_rq_count_bios(struct request *rq) @@ -1135,13 +1138,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,  	return bqt->tag_index[tag];  } -#define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */ + +#define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */ +#define BLKDEV_DISCARD_ZERO	(1 << 1)	/* must reliably zero data */  extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);  extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);  extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, -		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); +		sector_t nr_sects, gfp_t gfp_mask, int flags, +		struct bio **biop);  extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,  		sector_t nr_sects, gfp_t gfp_mask, struct page *page);  extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, @@ -1659,7 +1665,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,   */  struct blk_dax_ctl {  	sector_t sector; -	void __pmem *addr; +	void *addr;  	long size;  	pfn_t pfn;  }; @@ -1670,8 +1676,8 @@ struct block_device_operations {  	int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);  	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);  	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); -	long (*direct_access)(struct block_device *, sector_t, void __pmem **, -			pfn_t *, long); +	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, +			long);  	unsigned int (*check_events) (struct gendisk *disk,  				      unsigned int clearing);  	/* ->media_changed() is DEPRECATED, use ->check_events() instead */ |