提交 044f1daa 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes and updates from Jens Axboe:
 "Some fixes and followup features/changes that should go in, in this
  merge window. This contains:

   - Two fixes for lightnvm from Javier, fixing problems in the new code
     merge previously in this merge window.

   - A fix from Jan for the backing device changes, fixing an issue in
     NFS that causes a failure to mount on certain setups.

   - A change from Christoph, cleaning up the blk-mq init and exit
     request paths.

   - Remove elevator_change(), which is now unused. From Bart.

   - A fix for queue operation invocation on a dead queue, from Bart.

   - A series fixing up mtip32xx for blk-mq scheduling, removing a
     bandaid we previously had in place for this. From me.

   - A regression fix for this series, fixing a case where we wait on
     workqueue flushing from an invalid (non-blocking) context. From me.

   - A fix/optimization from Ming, ensuring that we don't both quiesce
     and freeze a queue at the same time.

   - A fix from Peter on lock ordering for CPU hotplug. Not a real
     problem right now, but will be once the CPU hotplug rework goes in.

   - A series from Omar, cleaning up out blk-mq debugfs support, and
     adding support for exporting info from schedulers in debugfs as
     well. This is really useful in debugging stalls or livelocks. From
     Omar"

* 'for-linus' of git://git.kernel.dk/linux-block: (28 commits)
  mq-deadline: add debugfs attributes
  kyber: add debugfs attributes
  blk-mq-debugfs: allow schedulers to register debugfs attributes
  blk-mq: untangle debugfs and sysfs
  blk-mq: move debugfs declarations to a separate header file
  blk-mq: Do not invoke queue operations on a dead queue
  blk-mq-debugfs: get rid of a bunch of boilerplate
  blk-mq-debugfs: rename hw queue directories from <n> to hctx<n>
  blk-mq-debugfs: don't open code strstrip()
  blk-mq-debugfs: error on long write to queue "state" file
  blk-mq-debugfs: clean up flag definitions
  blk-mq-debugfs: separate flags with |
  nfs: Fix bdi handling for cloned superblocks
  block/mq: Cure cpu hotplug lock inversion
  lightnvm: fix bad back free on error path
  lightnvm: create cmd before allocating request
  blk-mq: don't use sync workqueue flushing from drivers
  mtip32xx: convert internal commands to regular block infrastructure
  mtip32xx: cleanup internal tag assumptions
  block: don't call blk_mq_quiesce_queue() after queue is frozen
  ...
......@@ -561,13 +561,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
if (!q->mq_ops) {
spin_lock_irq(lock);
spin_lock_irq(lock);
if (!q->mq_ops)
__blk_drain_queue(q, true);
} else {
blk_mq_debugfs_unregister_mq(q);
spin_lock_irq(lock);
}
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
......
此差异已折叠。
#ifndef INT_BLK_MQ_DEBUGFS_H
#define INT_BLK_MQ_DEBUGFS_H
#ifdef CONFIG_BLK_DEBUG_FS
#include <linux/seq_file.h>
struct blk_mq_debugfs_attr {
const char *name;
umode_t mode;
int (*show)(void *, struct seq_file *);
ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
/* Set either .show or .seq_ops. */
const struct seq_operations *seq_ops;
};
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
int blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
int blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
#endif
#endif
......@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"
......@@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
/*
* For a reserved tag, allocate a normal request since we might
* have driver dependencies on the value of the internal tag.
*/
if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
/*
......@@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
return 0;
}
......@@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
if (!e)
return;
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
......@@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err;
if (e->ops.mq.init_hctx) {
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.mq.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
......@@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
}
return 0;
......@@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
if (e->type->ops.mq.exit_hctx) {
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
}
blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);
......
......@@ -258,8 +258,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
blk_mq_debugfs_unregister_mq(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
......@@ -318,8 +316,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
blk_mq_debugfs_register(q);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
......@@ -335,8 +331,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
blk_mq_debugfs_unregister_mq(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
......@@ -364,8 +358,6 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
blk_mq_debugfs_unregister_mq(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
......@@ -382,8 +374,6 @@ int blk_mq_sysfs_register(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
blk_mq_debugfs_register_mq(q);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
......
......@@ -31,6 +31,7 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
......@@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
......@@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
unsigned int i;
bool rcu = false;
blk_mq_stop_hw_queues(q);
__blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
......@@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
{
cancel_delayed_work_sync(&hctx->run_work);
if (sync)
cancel_delayed_work_sync(&hctx->run_work);
else
cancel_delayed_work(&hctx->run_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_stop_hw_queue(hctx, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
void blk_mq_stop_hw_queues(struct request_queue *q)
void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_stop_hw_queue(hctx);
__blk_mq_stop_hw_queue(hctx, sync);
}
void blk_mq_stop_hw_queues(struct request_queue *q)
{
__blk_mq_stop_hw_queues(q, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
......@@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
if (!rq)
continue;
set->ops->exit_request(set->driver_data, rq,
hctx_idx, i);
set->ops->exit_request(set, rq, hctx_idx);
tags->static_rqs[i] = NULL;
}
}
......@@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
tags->static_rqs[i] = rq;
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
rq, hctx_idx, i,
if (set->ops->init_request(set, rq, hctx_idx,
node)) {
tags->static_rqs[i] = NULL;
goto fail;
......@@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
unsigned flush_start_tag = set->queue_depth;
blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set->driver_data,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
......@@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
int node;
unsigned flush_start_tag = set->queue_depth;
node = hctx->numa_node;
if (node == NUMA_NO_NODE)
......@@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx, node))
set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(&hctx->queue_rq_srcu);
blk_mq_debugfs_register_hctx(q, hctx);
return 0;
free_fq:
......@@ -2329,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
get_online_cpus();
mutex_lock(&all_q_mutex);
get_online_cpus();
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask);
mutex_unlock(&all_q_mutex);
put_online_cpus();
mutex_unlock(&all_q_mutex);
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
......@@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
/*
......@@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
}
/*
......@@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
......@@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
q->nr_requests = nr;
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
......
......@@ -83,34 +83,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
/*
* debugfs helpers
*/
#ifdef CONFIG_BLK_DEBUG_FS
int blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_mq(struct request_queue *q);
void blk_mq_debugfs_unregister_mq(struct request_queue *q);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_mq(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q)
{
}
#endif
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
......
......@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
struct queue_sysfs_entry {
......@@ -889,6 +890,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
kobject_uevent(&q->kobj, KOBJ_ADD);
wbt_enable_default(q);
......
......@@ -950,7 +950,6 @@ static int elevator_switch_mq(struct request_queue *q,
int ret;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
......@@ -978,9 +977,7 @@ static int elevator_switch_mq(struct request_queue *q,
out:
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
/*
......@@ -1088,19 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
int elevator_change(struct request_queue *q, const char *name)
{
int ret;
/* Protect q->elevator from elevator_init() */
mutex_lock(&q->sysfs_lock);
ret = __elevator_change(q, name);
mutex_unlock(&q->sysfs_lock);
return ret;
}
EXPORT_SYMBOL(elevator_change);
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
......
......@@ -26,6 +26,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
......@@ -683,6 +684,131 @@ static struct elv_fs_entry kyber_sched_attrs[] = {
};
#undef KYBER_LAT_ATTR
#ifdef CONFIG_BLK_DEBUG_FS
#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
{ \
struct request_queue *q = data; \
struct kyber_queue_data *kqd = q->elevator->elevator_data; \
\
sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
return 0; \
} \
\
static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
__acquires(&khd->lock) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
spin_lock(&khd->lock); \
return seq_list_start(&khd->rqs[domain], *pos); \
} \
\
static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
loff_t *pos) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
return seq_list_next(v, &khd->rqs[domain], pos); \
} \
\
static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
__releases(&khd->lock) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
spin_unlock(&khd->lock); \
} \
\
static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
.start = kyber_##name##_rqs_start, \
.next = kyber_##name##_rqs_next, \
.stop = kyber_##name##_rqs_stop, \
.show = blk_mq_debugfs_rq_show, \
}; \
\
static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
{ \
struct blk_mq_hw_ctx *hctx = data; \
struct kyber_hctx_data *khd = hctx->sched_data; \
wait_queue_t *wait = &khd->domain_wait[domain]; \
\
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
static int kyber_async_depth_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct kyber_queue_data *kqd = q->elevator->elevator_data;
seq_printf(m, "%u\n", kqd->async_depth);
return 0;
}
static int kyber_cur_domain_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
switch (khd->cur_domain) {
case KYBER_READ:
seq_puts(m, "READ\n");
break;
case KYBER_SYNC_WRITE:
seq_puts(m, "SYNC_WRITE\n");
break;
case KYBER_OTHER:
seq_puts(m, "OTHER\n");
break;
default:
seq_printf(m, "%u\n", khd->cur_domain);
break;
}
return 0;
}
static int kyber_batching_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
seq_printf(m, "%u\n", khd->batching);
return 0;
}
#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
{#name "_tokens", 0400, kyber_##name##_tokens_show}
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(read),
KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
KYBER_QUEUE_DOMAIN_ATTRS(other),
{"async_depth", 0400, kyber_async_depth_show},
{},
};
#undef KYBER_QUEUE_DOMAIN_ATTRS
#define KYBER_HCTX_DOMAIN_ATTRS(name) \
{#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
{#name "_waiting", 0400, kyber_##name##_waiting_show}
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
KYBER_HCTX_DOMAIN_ATTRS(read),
KYBER_HCTX_DOMAIN_ATTRS(sync_write),
KYBER_HCTX_DOMAIN_ATTRS(other),
{"cur_domain", 0400, kyber_cur_domain_show},
{"batching", 0400, kyber_batching_show},
{},
};
#undef KYBER_HCTX_DOMAIN_ATTRS
#endif
static struct elevator_type kyber_sched = {
.ops.mq = {
.init_sched = kyber_init_sched,
......@@ -696,6 +822,10 @@ static struct elevator_type kyber_sched = {
.has_work = kyber_has_work,
},
.uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
#endif
.elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber",
.elevator_owner = THIS_MODULE,
......
......@@ -19,6 +19,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
......@@ -517,6 +518,125 @@ static struct elv_fs_entry deadline_attrs[] = {
__ATTR_NULL
};
#ifdef CONFIG_BLK_DEBUG_FS
#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
static void *deadline_##name##_fifo_start(struct seq_file *m, \
loff_t *pos) \
__acquires(&dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
spin_lock(&dd->lock); \
return seq_list_start(&dd->fifo_list[ddir], *pos); \
} \
\
static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
loff_t *pos) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
return seq_list_next(v, &dd->fifo_list[ddir], pos); \
} \
\
static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
__releases(&dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
spin_unlock(&dd->lock); \
} \
\
static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
.start = deadline_##name##_fifo_start, \
.next = deadline_##name##_fifo_next, \
.stop = deadline_##name##_fifo_stop, \
.show = blk_mq_debugfs_rq_show, \
}; \
\
static int deadline_##name##_next_rq_show(void *data, \
struct seq_file *m) \
{ \
struct request_queue *q = data; \
struct deadline_data *dd = q->elevator->elevator_data; \
struct request *rq = dd->next_rq[ddir]; \
\
if (rq) \
__blk_mq_debugfs_rq_show(m, rq); \
return 0; \
}
DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
#undef DEADLINE_DEBUGFS_DDIR_ATTRS
static int deadline_batching_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
seq_printf(m, "%u\n", dd->batching);
return 0;
}
static int deadline_starved_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
seq_printf(m, "%u\n", dd->starved);
return 0;
}
static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
__acquires(&dd->lock)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
spin_lock(&dd->lock);
return seq_list_start(&dd->dispatch, *pos);
}
static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
return seq_list_next(v, &dd->dispatch, pos);
}
static void deadline_dispatch_stop(struct seq_file *m, void *v)
__releases(&dd->lock)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
spin_unlock(&dd->lock);
}
static const struct seq_operations deadline_dispatch_seq_ops = {
.start = deadline_dispatch_start,
.next = deadline_dispatch_next,
.stop = deadline_dispatch_stop,
.show = blk_mq_debugfs_rq_show,
};
#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
{#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
DEADLINE_QUEUE_DDIR_ATTRS(read),
DEADLINE_QUEUE_DDIR_ATTRS(write),
{"batching", 0400, deadline_batching_show},
{"starved", 0400, deadline_starved_show},
{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
{},
};
#undef DEADLINE_QUEUE_DDIR_ATTRS
#endif
static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
......@@ -533,6 +653,9 @@ static struct elevator_type mq_deadline = {
},
.uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_owner = THIS_MODULE,
......
......@@ -1697,9 +1697,8 @@ static void loop_queue_work(struct kthread_work *work)
loop_handle_cmd(cmd);
}
static int loop_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
......
此差异已折叠。
......@@ -333,16 +333,6 @@ struct mtip_cmd {
dma_addr_t command_dma; /* corresponding physical address */
void *comp_data; /* data passed to completion function comp_func() */
/*
* Completion function called by the ISR upon completion of
* a command.
*/
void (*comp_func)(struct mtip_port *port,
int tag,
struct mtip_cmd *cmd,
int status);
int scatter_ents; /* Number of scatter list entries used */
int unaligned; /* command is unaligned on 4k boundary */
......
......@@ -1396,12 +1396,11 @@ static void nbd_dbg_close(void)
#endif
static int nbd_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = data;
cmd->nbd = set->driver_data;
return 0;
}
......
......@@ -4307,9 +4307,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
return ret;
}
static int rbd_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct work_struct *work = blk_mq_rq_to_pdu(rq);
......
......@@ -573,11 +573,10 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
static int virtblk_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct virtio_blk *vblk = data;
struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
......
......@@ -74,7 +74,7 @@ static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
return 0;
err:
while (--i > lun_begin)
while (--i >= lun_begin)
clear_bit(i, dev->lun_map);
return -EBUSY;
......@@ -211,7 +211,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
return tgt_dev;
err_ch:
while (--i > 0)
while (--i >= 0)
kfree(dev_map->chnls[i].lun_offs);
kfree(luns);
err_luns:
......
......@@ -720,11 +720,10 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return 0;
}
static int dm_mq_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
return __dm_rq_init_rq(data, rq);
return __dm_rq_init_rq(set->driver_data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
......
......@@ -334,10 +334,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int ubiblock_init_request(void *data, struct request *req,
unsigned int hctx_idx,
unsigned int request_idx,
unsigned int numa_node)
static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
......
......@@ -1172,12 +1172,12 @@ __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
}
static void
nvme_fc_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx)
nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
return __nvme_fc_exit_request(data, op);
return __nvme_fc_exit_request(set->driver_data, op);
}
static int
......@@ -1434,11 +1434,10 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
}
static int
nvme_fc_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = data;
struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
......@@ -1446,11 +1445,10 @@ nvme_fc_init_request(void *data, struct request *rq,
}
static int
nvme_fc_init_admin_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = data;
struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[0];
......
......@@ -503,6 +503,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
if (!cmd)
return -ENOMEM;
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
if (IS_ERR(rq)) {
kfree(cmd);
......@@ -517,8 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
rq->__data_len = 0;
}
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
......
......@@ -356,11 +356,11 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
nvmeq->tags = NULL;
}
static int nvme_admin_init_request(void *data, struct request *req,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_admin_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct nvme_dev *dev = data;
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[0];
......@@ -383,11 +383,10 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
static int nvme_init_request(void *data, struct request *req,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_dev *dev = data;
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
......
......@@ -315,16 +315,16 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
DMA_TO_DEVICE);
}
static void nvme_rdma_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx)
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
}
static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx)
static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
return __nvme_rdma_exit_request(data, rq, 0);
return __nvme_rdma_exit_request(set->driver_data, rq, 0);
}
static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
......@@ -358,18 +358,18 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
return -ENOMEM;
}
static int nvme_rdma_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
}
static int nvme_rdma_init_admin_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
return __nvme_rdma_init_request(data, rq, 0);
return __nvme_rdma_init_request(set->driver_data, rq, 0);
}
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
......
......@@ -230,18 +230,19 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
return 0;
}
static int nvme_loop_init_request(void *data, struct request *req,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
hctx_idx + 1);
}
static int nvme_loop_init_admin_request(void *data, struct request *req,
unsigned int hctx_idx, unsigned int rq_idx,
unsigned int numa_node)
static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
}
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
......
......@@ -1999,11 +1999,10 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
return scsi_times_out(req);
}
static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = data;
struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
cmd->sense_buffer =
......@@ -2014,10 +2013,10 @@ static int scsi_init_request(void *data, struct request *rq,
return 0;
}
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct Scsi_Host *shost = data;
struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
scsi_free_sense_buffer(shost, cmd->sense_buffer);
......
......@@ -139,7 +139,7 @@ struct nfs_mount_request {
};
struct nfs_mount_info {
int (*fill_super)(struct super_block *, struct nfs_mount_info *);
void (*fill_super)(struct super_block *, struct nfs_mount_info *);
int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
struct nfs_parsed_mount_data *parsed;
struct nfs_clone_mount *cloned;
......@@ -407,7 +407,7 @@ struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *
struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
const char *, struct nfs_mount_info *);
void nfs_kill_super(struct super_block *);
int nfs_fill_super(struct super_block *, struct nfs_mount_info *);
void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
extern struct rpc_stat nfs_rpcstat;
......@@ -458,7 +458,7 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
int nfs_clone_super(struct super_block *, struct nfs_mount_info *);
void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
void nfs_umount_begin(struct super_block *);
int nfs_statfs(struct dentry *, struct kstatfs *);
int nfs_show_options(struct seq_file *, struct dentry *);
......
......@@ -2321,11 +2321,10 @@ inline void nfs_initialise_sb(struct super_block *sb)
/*
* Finish setting up an NFS2/3 superblock
*/
int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
int ret;
sb->s_blocksize_bits = 0;
sb->s_blocksize = 0;
......@@ -2343,21 +2342,13 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
}
nfs_initialise_sb(sb);
ret = super_setup_bdi_name(sb, "%u:%u", MAJOR(server->s_dev),
MINOR(server->s_dev));
if (ret)
return ret;
sb->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
return 0;
}
EXPORT_SYMBOL_GPL(nfs_fill_super);
/*
* Finish setting up a cloned NFS2/3/4 superblock
*/
int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
......@@ -2377,10 +2368,6 @@ int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
}
nfs_initialise_sb(sb);
sb->s_bdi = bdi_get(old_sb->s_bdi);
return 0;
}
static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
......@@ -2600,14 +2587,19 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
nfs_free_server(server);
server = NULL;
} else {
error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev),
MINOR(server->s_dev));
if (error) {
mntroot = ERR_PTR(error);
goto error_splat_super;
}
s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
server->super = s;
}
if (!s->s_root) {
/* initial superblock/root creation */
error = mount_info->fill_super(s, mount_info);
if (error)
goto error_splat_super;
mount_info->fill_super(s, mount_info);
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
}
......
......@@ -57,6 +57,11 @@ struct blk_mq_hw_ctx {
unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
#endif
};
struct blk_mq_tag_set {
......@@ -86,9 +91,9 @@ typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_request_fn)(void *, struct request *, unsigned int,
typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int, unsigned int);
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
typedef int (reinit_request_fn)(void *, struct request *);
......
......@@ -579,7 +579,7 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *mq_debugfs_dir;
struct dentry *sched_debugfs_dir;
#endif
bool mq_sysfs_init_done;
......
......@@ -8,6 +8,9 @@
struct io_cq;
struct elevator_type;
#ifdef CONFIG_BLK_DEBUG_FS
struct blk_mq_debugfs_attr;
#endif
/*
* Return values from elevator merger
......@@ -144,6 +147,10 @@ struct elevator_type
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
bool uses_mq;
#ifdef CONFIG_BLK_DEBUG_FS
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
#endif
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
......@@ -214,7 +221,6 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
extern int elevator_change(struct request_queue *, const char *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册