diff options
-rw-r--r-- | Documentation/block/biodoc.txt | 4 | ||||
-rw-r--r-- | block/bio.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 16 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 15 | ||||
-rw-r--r-- | block/blk-mq.c | 30 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 2 | ||||
-rw-r--r-- | drivers/block/nbd.c | 411 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 2 | ||||
-rw-r--r-- | fs/befs/linuxvfs.c | 2 | ||||
-rw-r--r-- | fs/block_dev.c | 11 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 5 | ||||
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 14 | ||||
-rw-r--r-- | include/linux/blk_types.h | 21 | ||||
-rw-r--r-- | include/linux/blkdev.h | 4 | ||||
-rw-r--r-- | include/linux/ioprio.h | 1 | ||||
-rw-r--r-- | include/linux/workqueue.h | 1 | ||||
-rw-r--r-- | kernel/workqueue.c | 40 |
18 files changed, 268 insertions, 315 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index bcdb2b4c1f12..918e1e0d0e78 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -115,7 +115,7 @@ i. Per-queue limits/values exported to the generic layer by the driver Various parameters that the generic i/o scheduler logic uses are set at a per-queue level (e.g maximum request size, maximum number of segments in -a scatter-gather list, hardsect size) +a scatter-gather list, logical block size) Some parameters that were earlier available as global arrays indexed by major/minor are now directly associated with the queue. Some of these may @@ -156,7 +156,7 @@ Some new queue property settings: blk_queue_max_segment_size(q, max_seg_size) Maximum size of a clustered segment, 64kB default. - blk_queue_hardsect_size(q, hardsect_size) + blk_queue_logical_block_size(q, logical_block_size) Lowest possible sector size that the hardware can operate on, 512 bytes default. diff --git a/block/bio.c b/block/bio.c index aa7354088008..a6d279e1ea9e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1274,7 +1274,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, nr_pages += end - start; /* - * buffer must be aligned to at least hardsector size for now + * buffer must be aligned to at least logical block size for now */ if (uaddr & queue_dma_alignment(q)) return ERR_PTR(-EINVAL); diff --git a/block/blk-core.c b/block/blk-core.c index 36c7ac328d8c..14d7c0740dc0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - cancel_delayed_work_sync(&hctx->run_work); + cancel_work_sync(&hctx->run_work); cancel_delayed_work_sync(&hctx->delay_work); } } else { @@ -3097,6 +3097,12 @@ int kblockd_schedule_work(struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); +int kblockd_schedule_work_on(int cpu, struct work_struct *work) +{ + return queue_work_on(cpu, kblockd_workqueue, work); +} +EXPORT_SYMBOL(kblockd_schedule_work_on); + int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { @@ -3301,19 +3307,23 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie) { struct blk_plug *plug; long state; + unsigned int queue_num; + struct blk_mq_hw_ctx *hctx; if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return false; + queue_num = blk_qc_t_to_queue_num(cookie); + hctx = q->queue_hw_ctx[queue_num]; + hctx->poll_considered++; + plug = current->plug; if (plug) blk_flush_plug_list(plug, false); state = current->state; while (!need_resched()) { - unsigned int queue_num = blk_qc_t_to_queue_num(cookie); - struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num]; int ret; hctx->poll_invoked++; diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index fe822aa5b8e4..ac5160eb6862 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -176,7 +176,17 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page) { - return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success); + return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n", + hctx->poll_considered, hctx->poll_invoked, + hctx->poll_success); +} + +static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx, + const char *page, size_t size) +{ + hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; + + return size; } static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, @@ -301,8 +311,9 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { .show = blk_mq_hw_sysfs_cpus_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = { - .attr = {.name = "io_poll", .mode = S_IRUGO }, + .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO }, .show = blk_mq_hw_sysfs_poll_show, + .store = blk_mq_hw_sysfs_poll_store, }; static struct attribute *default_hw_ctx_attrs[] = { diff --git a/block/blk-mq.c b/block/blk-mq.c index 13f5a6c1de76..7ddc7969fba4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -22,6 +22,7 @@ #include <linux/sched/sysctl.h> #include <linux/delay.h> #include <linux/crash_dump.h> +#include <linux/prefetch.h> #include <trace/events/block.h> @@ -501,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request); static void blk_mq_requeue_work(struct work_struct *work) { struct request_queue *q = - container_of(work, struct request_queue, requeue_work); + container_of(work, struct request_queue, requeue_work.work); LIST_HEAD(rq_list); struct request *rq, *next; unsigned long flags; @@ -556,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list); void blk_mq_cancel_requeue_work(struct request_queue *q) { - cancel_work_sync(&q->requeue_work); + cancel_delayed_work_sync(&q->requeue_work); } EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); void blk_mq_kick_requeue_list(struct request_queue *q) { - kblockd_schedule_work(&q->requeue_work); + kblockd_schedule_delayed_work(&q->requeue_work, 0); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); +void blk_mq_delay_kick_requeue_list(struct request_queue *q, + unsigned long msecs) +{ + kblockd_schedule_delayed_work(&q->requeue_work, + msecs_to_jiffies(msecs)); +} +EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); + void blk_mq_abort_requeue_list(struct request_queue *q) { unsigned long flags; @@ -588,8 +597,10 @@ EXPORT_SYMBOL(blk_mq_abort_requeue_list); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { - if (tag < tags->nr_tags) + if (tag < tags->nr_tags) { + prefetch(tags->rqs[tag]); return tags->rqs[tag]; + } return NULL; } @@ -936,8 +947,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) put_cpu(); } - kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), - &hctx->run_work, 0); + kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); } void blk_mq_run_hw_queues(struct request_queue *q, bool async) @@ -958,7 +968,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { - cancel_delayed_work(&hctx->run_work); + cancel_work(&hctx->run_work); cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } @@ -1011,7 +1021,7 @@ static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; - hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); + hctx = container_of(work, struct blk_mq_hw_ctx, run_work); __blk_mq_run_hw_queue(hctx); } @@ -1722,7 +1732,7 @@ static int blk_mq_init_hctx(struct request_queue *q, if (node == NUMA_NO_NODE) node = hctx->numa_node = set->numa_node; - INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); @@ -2082,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->sg_reserved_size = INT_MAX; - INIT_WORK(&q->requeue_work, blk_mq_requeue_work); + INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 2aca98e8e427..88c46853dbb5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3686,7 +3686,7 @@ static int mtip_block_open(struct block_device *dev, fmode_t mode) return -ENODEV; } -void mtip_block_release(struct gendisk *disk, fmode_t mode) +static void mtip_block_release(struct gendisk *disk, fmode_t mode) { } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a9e398019f38..4c6dd1a85ead 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -34,33 +34,29 @@ #include <linux/kthread.h> #include <linux/types.h> #include <linux/debugfs.h> +#include <linux/blk-mq.h> #include <asm/uaccess.h> #include <asm/types.h> #include <linux/nbd.h> +#define NBD_TIMEDOUT 0 +#define NBD_DISCONNECT_REQUESTED 1 + struct nbd_device { u32 flags; + unsigned long runtime_flags; struct socket * sock; /* If == NULL, device is not ready, yet */ int magic; - spinlock_t queue_lock; - struct list_head queue_head; /* Requests waiting result */ - struct request *active_req; - wait_queue_head_t active_wq; - struct list_head waiting_queue; /* Requests to be sent */ - wait_queue_head_t waiting_wq; + struct blk_mq_tag_set tag_set; struct mutex tx_lock; struct gendisk *disk; int blksize; loff_t bytesize; - int xmit_timeout; - bool timedout; - bool disconnect; /* a disconnect has been requested by user */ - struct timer_list timeout_timer; /* protects initialization and shutdown of the socket */ spinlock_t sock_lock; struct task_struct *task_recv; @@ -71,6 +67,11 @@ struct nbd_device { #endif }; +struct nbd_cmd { + struct nbd_device *nbd; + struct list_head list; +}; + #if IS_ENABLED(CONFIG_DEBUG_FS) static struct dentry *nbd_dbg_dir; #endif @@ -83,18 +84,6 @@ static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; static int max_part; -/* - * Use just one lock (or at most 1 per NIC). Two arguments for this: - * 1. Each NIC is essentially a synchronization point for all servers - * accessed through that NIC so there's no need to have more locks - * than NICs anyway. - * 2. More locks lead to more "Dirty cache line bouncing" which will slow - * down each lock to the point where they're actually slower than just - * a single lock. - * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! - */ -static DEFINE_SPINLOCK(nbd_lock); - static inline struct device *nbd_to_dev(struct nbd_device *nbd) { return disk_to_dev(nbd->disk); @@ -153,18 +142,16 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, return 0; } -static void nbd_end_request(struct nbd_device *nbd, struct request *req) +static void nbd_end_request(struct nbd_cmd *cmd) { + struct nbd_device *nbd = cmd->nbd; + struct request *req = blk_mq_rq_from_pdu(cmd); int error = req->errors ? -EIO : 0; - struct request_queue *q = req->q; - unsigned long flags; - dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, + dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, error ? "failed" : "done"); - spin_lock_irqsave(q->queue_lock, flags); - __blk_end_request_all(req, error); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_mq_complete_request(req, error); } /* @@ -172,40 +159,49 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req) */ static void sock_shutdown(struct nbd_device *nbd) { - spin_lock_irq(&nbd->sock_lock); + struct socket *sock; + + spin_lock(&nbd->sock_lock); if (!nbd->sock) { spin_unlock_irq(&nbd->sock_lock); return; } + sock = nbd->sock; dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); - kernel_sock_shutdown(nbd->sock, SHUT_RDWR); - sockfd_put(nbd->sock); nbd->sock = NULL; - spin_unlock_irq(&nbd->sock_lock); + spin_unlock(&nbd->sock_lock); - del_timer(&nbd->timeout_timer); + kernel_sock_shutdown(sock, SHUT_RDWR); + sockfd_put(sock); } -static void nbd_xmit_timeout(unsigned long arg) +static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, + bool reserved) { - struct nbd_device *nbd = (struct nbd_device *)arg; - unsigned long flags; - - if (list_empty(&nbd->queue_head)) - return; + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); + struct nbd_device *nbd = cmd->nbd; + struct socket *sock = NULL; - spin_lock_irqsave(&nbd->sock_lock, flags); + spin_lock(&nbd->sock_lock); - nbd->timedout = true; + set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); - if (nbd->sock) - kernel_sock_shutdown(nbd->sock, SHUT_RDWR); + if (nbd->sock) { + sock = nbd->sock; + get_file(sock->file); + } - spin_unlock_irqrestore(&nbd->sock_lock, flags); + spin_unlock(&nbd->sock_lock); + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sockfd_put(sock); + } + req->errors++; dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); + return BLK_EH_HANDLED; } /* @@ -255,9 +251,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, tsk_restore_flags(current, pflags, PF_MEMALLOC); - if (!send && nbd->xmit_timeout) - mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); - return result; } @@ -273,8 +266,9 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, } /* always call with the tx_lock held */ -static int nbd_send_req(struct nbd_device *nbd, struct request *req) +static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd) { + struct request *req = blk_mq_rq_from_pdu(cmd); int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); @@ -298,10 +292,10 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } - memcpy(request.handle, &req, sizeof(req)); + memcpy(request.handle, &req->tag, sizeof(req->tag)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", - req, nbdcmd_to_ascii(type), + cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (type == NBD_CMD_WRITE) ? MSG_MORE : 0); @@ -323,7 +317,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", - req, bvec.bv_len); + cmd, bvec.bv_len); result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), @@ -336,29 +330,6 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) return 0; } -static struct request *nbd_find_request(struct nbd_device *nbd, - struct request *xreq) -{ - struct request *req, *tmp; - int err; - - err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); - if (unlikely(err)) - return ERR_PTR(err); - - spin_lock(&nbd->queue_lock); - list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { - if (req != xreq) - continue; - list_del_init(&req->queuelist); - spin_unlock(&nbd->queue_lock); - return req; - } - spin_unlock(&nbd->queue_lock); - - return ERR_PTR(-ENOENT); -} - static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; @@ -370,11 +341,14 @@ static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) } /* NULL returned = something went wrong, inform userspace */ -static struct request *nbd_read_stat(struct nbd_device *nbd) +static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; - struct request *req; + struct nbd_cmd *cmd; + struct request *req = NULL; + u16 hwq; + int tag; reply.magic = 0; result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); @@ -390,25 +364,27 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) return ERR_PTR(-EPROTO); } - req = nbd_find_request(nbd, *(struct request **)reply.handle); - if (IS_ERR(req)) { - result = PTR_ERR(req); - if (result != -ENOENT) - return ERR_PTR(result); + memcpy(&tag, reply.handle, sizeof(int)); - dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", - reply.handle); - return ERR_PTR(-EBADR); + hwq = blk_mq_unique_tag_to_hwq(tag); + if (hwq < nbd->tag_set.nr_hw_queues) + req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], + blk_mq_unique_tag_to_tag(tag)); + if (!req || !blk_mq_request_started(req)) { + dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", + tag, req); + return ERR_PTR(-ENOENT); } + cmd = blk_mq_rq_to_pdu(req); if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; - return req; + return cmd; } - dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); if (rq_data_dir(req) != WRITE) { struct req_iterator iter; struct bio_vec bvec; @@ -419,13 +395,13 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; - return req; + return cmd; } dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", - req, bvec.bv_len); + cmd, bvec.bv_len); } } - return req; + return cmd; } static ssize_t pid_show(struct device *dev, @@ -444,7 +420,7 @@ static struct device_attribute pid_attr = { static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) { - struct request *req; + struct nbd_cmd *cmd; int ret; BUG_ON(nbd->magic != NBD_MAGIC); @@ -460,13 +436,13 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) nbd_size_update(nbd, bdev); while (1) { - req = nbd_read_stat(nbd); - if (IS_ERR(req)) { - ret = PTR_ERR(req); + cmd = nbd_read_stat(nbd); + if (IS_ERR(cmd)) { + ret = PTR_ERR(cmd); break; } - nbd_end_request(nbd, req); + nbd_end_request(cmd); } nbd_size_clear(nbd, bdev); @@ -475,44 +451,37 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) return ret; } -static void nbd_clear_que(struct nbd_device *nbd) +static void nbd_clear_req(struct request *req, void *data, bool reserved) { - struct request *req; + struct nbd_cmd *cmd; + + if (!blk_mq_request_started(req)) + return; + cmd = blk_mq_rq_to_pdu(req); + req->errors++; + nbd_end_request(cmd); +} +static void nbd_clear_que(struct nbd_device *nbd) +{ BUG_ON(nbd->magic != NBD_MAGIC); /* * Because we have set nbd->sock to NULL under the tx_lock, all - * modifications to the list must have completed by now. For - * the same reason, the active_req must be NULL. - * - * As a consequence, we don't need to take the spin lock while - * purging the list here. + * modifications to the list must have completed by now. */ BUG_ON(nbd->sock); - BUG_ON(nbd->active_req); - while (!list_empty(&nbd->queue_head)) { - req = list_entry(nbd->queue_head.next, struct request, - queuelist); - list_del_init(&req->queuelist); - req->errors++; - nbd_end_request(nbd, req); - } - - while (!list_empty(&nbd->waiting_queue)) { - req = list_entry(nbd->waiting_queue.next, struct request, - queuelist); - list_del_init(&req->queuelist); - req->errors++; - nbd_end_request(nbd, req); - } + blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); } -static void nbd_handle_req(struct nbd_device *nbd, struct request *req) +static void nbd_handle_cmd(struct nbd_cmd *cmd) { + struct request *req = blk_mq_rq_from_pdu(cmd); + struct nbd_device *nbd = cmd->nbd; + if (req->cmd_type != REQ_TYPE_FS) goto error_out; @@ -526,6 +495,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) req->errors = 0; mutex_lock(&nbd->tx_lock); + nbd->task_send = current; if (unlikely(!nbd->sock)) { mutex_unlock(&nbd->tx_lock); dev_err(disk_to_dev(nbd->disk), @@ -533,106 +503,30 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) goto error_out; } - nbd->active_req = req; - - if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head)) - mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); - - if (nbd_send_req(nbd, req) != 0) { + if (nbd_send_cmd(nbd, cmd) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; - nbd_end_request(nbd, req); - } else { - spin_lock(&nbd->queue_lock); - list_add_tail(&req->queuelist, &nbd->queue_head); - spin_unlock(&nbd->queue_lock); + nbd_end_request(cmd); } - nbd->active_req = NULL; + nbd->task_send = NULL; mutex_unlock(&nbd->tx_lock); - wake_up_all(&nbd->active_wq); return; error_out: req->errors++; - nbd_end_request(nbd, req); + nbd_end_request(cmd); } -static int nbd_thread_send(void *data) +static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) { - struct nbd_device *nbd = data; - struct request *req; - - nbd->task_send = current; - - set_user_nice(current, MIN_NICE); - while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { - /* wait for something to do */ - wait_event_interruptible(nbd->waiting_wq, - kthread_should_stop() || - !list_empty(&nbd->waiting_queue)); - - /* extract request */ - if (list_empty(&nbd->waiting_queue)) - continue; - - spin_lock_irq(&nbd->queue_lock); - req = list_entry(nbd->waiting_queue.next, struct request, - queuelist); - list_del_init(&req->queuelist); - spin_unlock_irq(&nbd->queue_lock); - - /* handle request */ - nbd_handle_req(nbd, req); - } + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); - nbd->task_send = NULL; - - return 0; -} - -/* - * We always wait for result of write, for now. It would be nice to make it optional - * in future - * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) - * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } - */ - -static void nbd_request_handler(struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) -{ - struct request *req; - - while ((req = blk_fetch_request(q)) != NULL) { - struct nbd_device *nbd; - - spin_unlock_irq(q->queue_lock); - - nbd = req->rq_disk->private_data; - - BUG_ON(nbd->magic != NBD_MAGIC); - - dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n", - req, req->cmd_type); - - if (unlikely(!nbd->sock)) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Attempted send on closed socket\n"); - req->errors++; - nbd_end_request(nbd, req); - spin_lock_irq(q->queue_lock); - continue; - } - - spin_lock_irq(&nbd->queue_lock); - list_add_tail(&req->queuelist, &nbd->waiting_queue); - spin_unlock_irq(&nbd->queue_lock); - - wake_up(&nbd->waiting_wq); - - spin_lock_irq(q->queue_lock); - } + blk_mq_start_request(bd->rq); + nbd_handle_cmd(cmd); + return BLK_MQ_RQ_QUEUE_OK; } static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock) @@ -657,15 +551,13 @@ out: /* Reset all properties of an NBD device */ static void nbd_reset(struct nbd_device *nbd) { - nbd->disconnect = false; - nbd->timedout = false; + nbd->runtime_flags = 0; nbd->blksize = 1024; nbd->bytesize = 0; set_capacity(nbd->disk, 0); nbd->flags = 0; - nbd->xmit_timeout = 0; + nbd->tag_set.timeout = 0; queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); - del_timer_sync(&nbd->timeout_timer); } static void nbd_bdev_reset(struct block_device *bdev) @@ -700,33 +592,37 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, { switch (cmd) { case NBD_DISCONNECT: { - struct request sreq; + struct request *sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); if (!nbd->sock) return -EINVAL; + sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); + if (!sreq) + return -ENOMEM; + mutex_unlock(&nbd->tx_lock); fsync_bdev(bdev); mutex_lock(&nbd->tx_lock); - blk_rq_init(NULL, &sreq); - sreq.cmd_type = REQ_TYPE_DRV_PRIV; + sreq->cmd_type = REQ_TYPE_DRV_PRIV; /* Check again after getting mutex back. */ - if (!nbd->sock) + if (!nbd->sock) { + blk_mq_free_request(sreq); return -EINVAL; + } - nbd->disconnect = true; + set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags); - nbd_send_req(nbd, &sreq); + nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq)); + blk_mq_free_request(sreq); return 0; } case NBD_CLEAR_SOCK: sock_shutdown(nbd); nbd_clear_que(nbd); - BUG_ON(!list_empty(&nbd->queue_head)); - BUG_ON(!list_empty(&nbd->waiting_queue)); kill_bdev(bdev); return 0; @@ -758,13 +654,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return nbd_size_set(nbd, bdev, nbd->blksize, arg); case NBD_SET_TIMEOUT: - nbd->xmit_timeout = arg * HZ; - if (arg) - mod_timer(&nbd->timeout_timer, - jiffies + nbd->xmit_timeout); - else - del_timer_sync(&nbd->timeout_timer); - + nbd->tag_set.timeout = arg * HZ; return 0; case NBD_SET_FLAGS: @@ -772,7 +662,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return 0; case NBD_DO_IT: { - struct task_struct *thread; int error; if (nbd->task_recv) @@ -786,18 +675,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd_parse_flags(nbd, bdev); - thread = kthread_run(nbd_thread_send, nbd, "%s", - nbd_name(nbd)); - if (IS_ERR(thread)) { - mutex_lock(&nbd->tx_lock); - nbd->task_recv = NULL; - return PTR_ERR(thread); - } - nbd_dev_dbg_init(nbd); error = nbd_thread_recv(nbd, bdev); nbd_dev_dbg_close(nbd); - kthread_stop(thread); mutex_lock(&nbd->tx_lock); nbd->task_recv = NULL; @@ -807,9 +687,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, kill_bdev(bdev); nbd_bdev_reset(bdev); - if (nbd->disconnect) /* user requested, ignore socket errors */ + /* user requested, ignore socket errors */ + if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) error = 0; - if (nbd->timedout) + if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) error = -ETIMEDOUT; nbd_reset(nbd); @@ -825,10 +706,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return 0; case NBD_PRINT_DEBUG: - dev_info(disk_to_dev(nbd->disk), - "next = %p, prev = %p, head = %p\n", - nbd->queue_head.next, nbd->queue_head.prev, - &nbd->queue_head); + /* + * For compatibility only, we no longer keep a list of + * outstanding requests. + */ return 0; } return -ENOTTY; @@ -935,7 +816,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); - debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); + debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); @@ -987,6 +868,24 @@ static void nbd_dbg_close(void) #endif +static int nbd_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) +{ + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->nbd = data; + INIT_LIST_HEAD(&cmd->list); + return 0; +} + +static struct blk_mq_ops nbd_mq_ops = { + .queue_rq = nbd_queue_rq, + .map_queue = blk_mq_map_queue, + .init_request = nbd_init_request, + .timeout = nbd_xmit_timeout, +}; + /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) @@ -1035,16 +934,34 @@ static int __init nbd_init(void) if (!disk) goto out; nbd_dev[i].disk = disk; + + nbd_dev[i].tag_set.ops = &nbd_mq_ops; + nbd_dev[i].tag_set.nr_hw_queues = 1; + nbd_dev[i].tag_set.queue_depth = 128; + nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE; + nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd); + nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_SG_MERGE; + nbd_dev[i].tag_set.driver_data = &nbd_dev[i]; + + err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set); + if (err) { + put_disk(disk); + goto out; + } + /* * The new linux 2.5 block layer implementation requires * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ - disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock); + disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); if (!disk->queue) { + blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); goto out; } + /* * Tell the block layer that we are not a rotational device */ @@ -1069,16 +986,8 @@ static int __init nbd_init(void) for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = NBD_MAGIC; - INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); - spin_lock_init(&nbd_dev[i].queue_lock); spin_lock_init(&nbd_dev[i].sock_lock); - INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); - init_timer(&nbd_dev[i].timeout_timer); - nbd_dev[i].timeout_timer.function = nbd_xmit_timeout; - nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i]; - init_waitqueue_head(&nbd_dev[i].active_wq); - init_waitqueue_head(&nbd_dev[i].waiting_wq); disk->major = NBD_MAJOR; disk->first_minor = i << part_shift; disk->fops = &nbd_fops; @@ -1091,6 +1000,7 @@ static int __init nbd_init(void) return 0; out: while (i--) { + blk_mq_free_tag_set(&nbd_dev[i].tag_set); blk_cleanup_queue(nbd_dev[i].disk->queue); put_disk(nbd_dev[i].disk); } @@ -1110,6 +1020,7 @@ static void __exit nbd_cleanup(void) if (disk) { del_gendisk(disk); blk_cleanup_queue(disk->queue); + blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); } } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 874295757caa..0448e7e35c8c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; - bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf); + bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio)); } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 7da05b159ade..bfe9f9994935 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -789,7 +789,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) * Will be set to real fs blocksize later. * * Linux 2.4.10 and later refuse to read blocks smaller than - * the hardsect size for the device. But we also need to read at + * the logical block size for the device. But we also need to read at * least 1k to get the second 512 bytes of the volume. * -WD 10-26-01 */ diff --git a/fs/block_dev.c b/fs/block_dev.c index 08ae99343d92..a516568f63b6 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -180,9 +180,6 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) struct file *file = iocb->ki_filp; struct inode *inode = bdev_file_inode(file); - if (IS_DAX(inode)) - return dax_do_io(iocb, inode, iter, blkdev_get_block, - NULL, DIO_SKIP_DIO_COUNT); return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, blkdev_get_block, NULL, NULL, DIO_SKIP_DIO_COUNT); @@ -1275,7 +1272,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_disk = disk; bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; - bdev->bd_inode->i_flags = 0; if (!partno) { ret = -ENXIO; @@ -1303,11 +1299,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } - if (!ret) { + if (!ret) bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); - if (!bdev_dax_capable(bdev)) - bdev->bd_inode->i_flags &= ~S_DAX; - } /* * If the device is invalidated, rescan partition @@ -1342,8 +1335,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; } bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); - if (!bdev_dax_capable(bdev)) - bdev->bd_inode->i_flags &= ~S_DAX; } } else { if (bdev->bd_contains == bdev) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e6811c42e41e..ca01106795ea 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8412,7 +8412,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, if (!bio) return -ENOMEM; - bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf); + bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio)); bio->bi_private = dip; bio->bi_end_io = btrfs_end_dio_bio; btrfs_io_bio(bio)->logical = file_offset; @@ -8450,7 +8450,8 @@ next_block: start_sector, GFP_NOFS); if (!bio) goto out_err; - bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf); + bio_set_op_attrs(bio, bio_op(orig_bio), + bio_flags(orig_bio)); bio->bi_private = dip; bio->bi_end_io = btrfs_end_dio_bio; btrfs_io_bio(bio)->logical = file_offset; diff --git a/include/linux/bio.h b/include/linux/bio.h index 23ddf4b46a9b..e00721a2dce1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -1,6 +1,4 @@ /* - * 2.5 block I/O model - * * Copyright (C) 2001 Jens Axboe <[email protected]> * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index e43bbffb5b7a..60ef14cbcd2d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -22,11 +22,10 @@ struct blk_mq_hw_ctx { struct { spinlock_t lock; struct list_head dispatch; + unsigned long state; /* BLK_MQ_S_* flags */ } ____cacheline_aligned_in_smp; - unsigned long state; /* BLK_MQ_S_* flags */ - struct delayed_work run_work; - struct delayed_work delay_work; + struct work_struct run_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; @@ -40,8 +39,8 @@ struct blk_mq_hw_ctx { struct blk_mq_ctxmap ctx_map; - unsigned int nr_ctx; struct blk_mq_ctx **ctxs; + unsigned int nr_ctx; atomic_t wait_index; @@ -49,7 +48,7 @@ struct blk_mq_hw_ctx { unsigned long queued; unsigned long run; -#define BLK_MQ_MAX_DISPATCH_ORDER 10 +#define BLK_MQ_MAX_DISPATCH_ORDER 7 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned int numa_node; @@ -57,9 +56,12 @@ struct blk_mq_hw_ctx { atomic_t nr_active; + struct delayed_work delay_work; + struct blk_mq_cpu_notifier cpu_notifier; struct kobject kobj; + unsigned long poll_considered; unsigned long poll_invoked; unsigned long poll_success; }; @@ -221,7 +223,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) } struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); @@ -232,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq, int error); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 436f43f87da9..cd395ecec99d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -16,7 +16,6 @@ struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); -typedef void (bio_destructor_t) (struct bio *); #ifdef CONFIG_BLOCK /* @@ -89,14 +88,22 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; -#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) +#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) +#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) -#define bio_set_op_attrs(bio, op, op_flags) do { \ - WARN_ON(op >= (1 << REQ_OP_BITS)); \ - (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \ - (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \ - (bio)->bi_opf |= op_flags; \ +#define bio_set_op_attrs(bio, op, op_flags) do { \ + if (__builtin_constant_p(op)) \ + BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ + else \ + WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ + if (__builtin_constant_p(op_flags)) \ + BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ + else \ + WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ + (bio)->bi_opf = bio_flags(bio); \ + (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ + (bio)->bi_opf |= (op_flags); \ } while (0) #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e79055c8b577..c47c358ba052 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -449,7 +449,7 @@ struct request_queue { struct list_head requeue_list; spinlock_t requeue_lock; - struct work_struct requeue_work; + struct delayed_work requeue_work; struct mutex sysfs_lock; @@ -1440,8 +1440,8 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio) return bio_will_gap(req->q, bio, req->bio); } -struct work_struct; int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_work_on(int cpu, struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index beb9ce1c2c23..8c1239020d79 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -7,7 +7,6 @@ /* * Gives us 8 prio classes with 13-bits of data for each class */ -#define IOPRIO_BITS (16) #define IOPRIO_CLASS_SHIFT (13) #define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 26cc1df280d6..fc6e22186405 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -442,6 +442,7 @@ extern int schedule_on_each_cpu(work_func_t func); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); +extern bool cancel_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ef071ca73fc3..bd81f0390277 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2974,6 +2974,31 @@ bool flush_delayed_work(struct delayed_work *dwork) } EXPORT_SYMBOL(flush_delayed_work); +static bool __cancel_work(struct work_struct *work, bool is_dwork) +{ + unsigned long flags; + int ret; + + do { + ret = try_to_grab_pending(work, is_dwork, &flags); + } while (unlikely(ret == -EAGAIN)); + + if (unlikely(ret < 0)) + return false; + + set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + local_irq_restore(flags); + return ret; +} + +/* + * See cancel_delayed_work() + */ +bool cancel_work(struct work_struct *work) +{ + return __cancel_work(work, false); +} + /** * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel @@ -2992,20 +3017,7 @@ EXPORT_SYMBOL(flush_delayed_work); */ bool cancel_delayed_work(struct delayed_work *dwork) { - unsigned long flags; - int ret; - - do { - ret = try_to_grab_pending(&dwork->work, true, &flags); - } while (unlikely(ret == -EAGAIN)); - - if (unlikely(ret < 0)) - return false; - - set_work_pool_and_clear_pending(&dwork->work, - get_work_pool_id(&dwork->work)); - local_irq_restore(flags); - return ret; + return __cancel_work(&dwork->work, true); } EXPORT_SYMBOL(cancel_delayed_work); |