blk-mq: turn hctx->run_work into a regular work struct
We don't need the larger delayed work struct, since we always run it
immediately.
Signed-off-by: Jens Axboe <axboe@fb.com>
diff --git a/block/blk-core.c b/block/blk-core.c
index 2d08597..34ff808 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -288,7 +288,7 @@
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- cancel_delayed_work_sync(&hctx->run_work);
+ cancel_work_sync(&hctx->run_work);
cancel_delayed_work_sync(&hctx->delay_work);
}
} else {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c..b68fdcb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -936,8 +936,7 @@
put_cpu();
}
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work, 0);
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -958,7 +957,7 @@
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- cancel_delayed_work(&hctx->run_work);
+ cancel_work(&hctx->run_work);
cancel_delayed_work(&hctx->delay_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
@@ -1011,7 +1010,7 @@
{
struct blk_mq_hw_ctx *hctx;
- hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
__blk_mq_run_hw_queue(hctx);
}
@@ -1722,7 +1721,7 @@
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
- INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e43bbff..d579252 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -25,7 +25,7 @@
} ____cacheline_aligned_in_smp;
unsigned long state; /* BLK_MQ_S_* flags */
- struct delayed_work run_work;
+ struct work_struct run_work;
struct delayed_work delay_work;
cpumask_var_t cpumask;
int next_cpu;