diff options
Diffstat (limited to 'include/linux/blk-mq.h')
| -rw-r--r-- | include/linux/blk-mq.h | 20 | 
1 files changed, 17 insertions, 3 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3fa1fa59f9b2..0bf056de5cc3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,7 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *);  typedef int (map_queues_fn)(struct blk_mq_tag_set *set);  typedef bool (busy_fn)(struct request_queue *);  typedef void (complete_fn)(struct request *); +typedef void (cleanup_rq_fn)(struct request *);  struct blk_mq_ops { @@ -201,6 +202,12 @@ struct blk_mq_ops {  	void (*initialize_rq_fn)(struct request *rq);  	/* +	 * Called before freeing one request which isn't completed yet, +	 * and usually for freeing the driver private data +	 */ +	cleanup_rq_fn		*cleanup_rq; + +	/*  	 * If set, returns whether or not this queue currently is busy  	 */  	busy_fn			*busy; @@ -241,12 +248,12 @@ enum {  struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);  struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, -						  struct request_queue *q); +						  struct request_queue *q, +						  bool elevator_init);  struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,  						const struct blk_mq_ops *ops,  						unsigned int queue_depth,  						unsigned int set_flags); -int blk_mq_register_dev(struct device *, struct request_queue *);  void blk_mq_unregister_dev(struct device *, struct request_queue *);  int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); @@ -296,6 +303,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)  int blk_mq_request_started(struct request *rq); +int blk_mq_request_completed(struct request *rq);  void blk_mq_start_request(struct request *rq);  void blk_mq_end_request(struct request *rq, blk_status_t error);  void __blk_mq_end_request(struct request *rq, blk_status_t error); @@ -304,7 +312,6 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);  void blk_mq_kick_requeue_list(struct request_queue *q);  void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);  bool blk_mq_complete_request(struct request *rq); -void blk_mq_complete_request_sync(struct request *rq);  bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,  			   struct bio *bio, unsigned int nr_segs);  bool blk_mq_queue_stopped(struct request_queue *q); @@ -321,6 +328,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);  void blk_mq_run_hw_queues(struct request_queue *q, bool async);  void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,  		busy_tag_iter_fn *fn, void *priv); +void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);  void blk_mq_freeze_queue(struct request_queue *q);  void blk_mq_unfreeze_queue(struct request_queue *q);  void blk_freeze_queue_start(struct request_queue *q); @@ -366,4 +374,10 @@ static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,  			BLK_QC_T_INTERNAL;  } +static inline void blk_mq_cleanup_rq(struct request *rq) +{ +	if (rq->q->mq_ops->cleanup_rq) +		rq->q->mq_ops->cleanup_rq(rq); +} +  #endif  |