blk-mq-sysfs.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>

#include <linux/blk-mq.h>
13
#include "blk.h"
14 15 16 17 18
#include "blk-mq.h"
#include "blk-mq-tag.h"

static void blk_mq_sysfs_release(struct kobject *kobj)
{
19 20 21 22 23 24 25 26 27 28 29 30
	struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);

	free_percpu(ctxs->queue_ctx);
	kfree(ctxs);
}

static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);

	/* ctx->ctxs won't be released until all ctx are freed */
	kobject_put(&ctx->ctxs->kobj);
31 32
}

33 34 35 36
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
						  kobj);
37 38 39 40 41

	if (hctx->flags & BLK_MQ_F_BLOCKING)
		cleanup_srcu_struct(hctx->srcu);
	blk_free_flush_queue(hctx->fq);
	sbitmap_free(&hctx->ctx_map);
42
	free_cpumask_var(hctx->cpumask);
43 44 45 46
	kfree(hctx->ctxs);
	kfree(hctx);
}

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
struct blk_mq_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_ctx *, char *);
	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};

struct blk_mq_hw_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};

static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
				 char *page)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(ctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *page, size_t length)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(ctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
				    struct attribute *attr, char *page)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(hctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
				     struct attribute *attr, const char *page,
				     size_t length)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(hctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

152 153
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
					    char *page)
154
{
155
	return sprintf(page, "%u\n", hctx->tags->nr_tags);
156 157
}

158 159
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
						     char *page)
160
{
161
	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
162 163
}

164 165
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
166
	const size_t size = PAGE_SIZE - 1;
167
	unsigned int i, first = 1;
168
	int ret = 0, pos = 0;
169

170
	for_each_cpu(i, hctx->cpumask) {
171
		if (first)
172
			ret = snprintf(pos + page, size - pos, "%u", i);
173
		else
174 175 176 177
			ret = snprintf(pos + page, size - pos, ", %u", i);

		if (ret >= size - pos)
			break;
178 179

		first = 0;
180
		pos += ret;
181 182
	}

183
	ret = snprintf(pos + page, size + 1 - pos, "\n");
184
	return pos + ret;
185 186
}

187 188 189 190 191
static ssize_t blk_mq_hw_sysfs_type_show(struct blk_mq_hw_ctx *hctx, char *page)
{
	return sprintf(page, "%u\n", hctx->type);
}

192 193 194 195
static struct attribute *default_ctx_attrs[] = {
	NULL,
};

196
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
197
	.attr = {.name = "nr_tags", .mode = 0444 },
198 199 200
	.show = blk_mq_hw_sysfs_nr_tags_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
201
	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
202 203
	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
};
204
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
205
	.attr = {.name = "cpu_list", .mode = 0444 },
206 207
	.show = blk_mq_hw_sysfs_cpus_show,
};
208 209 210 211
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_type = {
	.attr = {.name = "type", .mode = 0444 },
	.show = blk_mq_hw_sysfs_type_show,
};
212 213

static struct attribute *default_hw_ctx_attrs[] = {
214 215
	&blk_mq_hw_sysfs_nr_tags.attr,
	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
216
	&blk_mq_hw_sysfs_cpus.attr,
217
	&blk_mq_hw_sysfs_type.attr,
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	NULL,
};

static const struct sysfs_ops blk_mq_sysfs_ops = {
	.show	= blk_mq_sysfs_show,
	.store	= blk_mq_sysfs_store,
};

static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
	.show	= blk_mq_hw_sysfs_show,
	.store	= blk_mq_hw_sysfs_store,
};

static struct kobj_type blk_mq_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.release	= blk_mq_sysfs_release,
};

static struct kobj_type blk_mq_ctx_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.default_attrs	= default_ctx_attrs,
239
	.release	= blk_mq_ctx_sysfs_release,
240 241 242 243 244
};

static struct kobj_type blk_mq_hw_ktype = {
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.default_attrs	= default_hw_ctx_attrs,
245
	.release	= blk_mq_hw_sysfs_release,
246 247
};

248
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
249 250 251 252
{
	struct blk_mq_ctx *ctx;
	int i;

253
	if (!hctx->nr_ctx)
254 255 256 257 258 259 260 261
		return;

	hctx_for_each_ctx(hctx, ctx, i)
		kobject_del(&ctx->kobj);

	kobject_del(&hctx->kobj);
}

262
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
263 264 265 266 267
{
	struct request_queue *q = hctx->queue;
	struct blk_mq_ctx *ctx;
	int i, ret;

268
	if (!hctx->nr_ctx)
269 270
		return 0;

271
	ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
272 273 274 275 276 277 278 279 280 281 282 283
	if (ret)
		return ret;

	hctx_for_each_ctx(hctx, ctx, i) {
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		if (ret)
			break;
	}

	return ret;
}

284
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
285
{
286
	struct blk_mq_hw_ctx *hctx;
287
	int i;
288

289 290
	lockdep_assert_held(&q->sysfs_lock);

291
	queue_for_each_hw_ctx(q, hctx, i)
292 293
		blk_mq_unregister_hctx(hctx);

294 295
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
296
	kobject_put(&dev->kobj);
297 298

	q->mq_sysfs_init_done = false;
299 300
}

K
Keith Busch 已提交
301 302 303 304 305
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}

306 307 308 309 310 311 312 313 314
void blk_mq_sysfs_deinit(struct request_queue *q)
{
	struct blk_mq_ctx *ctx;
	int cpu;

	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
		kobject_put(&ctx->kobj);
	}
315
	kobject_put(q->mq_kobj);
316 317
}

318
void blk_mq_sysfs_init(struct request_queue *q)
319 320
{
	struct blk_mq_ctx *ctx;
321
	int cpu;
322

323
	kobject_init(q->mq_kobj, &blk_mq_ktype);
324

325 326
	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
327 328

		kobject_get(q->mq_kobj);
329
		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
330
	}
331 332
}

333
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
334 335
{
	struct blk_mq_hw_ctx *hctx;
336
	int ret, i;
337

338 339
	WARN_ON_ONCE(!q->kobj.parent);
	lockdep_assert_held(&q->sysfs_lock);
340

341
	ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
342
	if (ret < 0)
343
		goto out;
344

345
	kobject_uevent(q->mq_kobj, KOBJ_ADD);
346 347

	queue_for_each_hw_ctx(q, hctx, i) {
348
		ret = blk_mq_register_hctx(hctx);
349
		if (ret)
350
			goto unreg;
351 352
	}

353
	q->mq_sysfs_init_done = true;
354

355
out:
356
	return ret;
357 358 359 360 361

unreg:
	while (--i >= 0)
		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);

362 363
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
364 365
	kobject_put(&dev->kobj);
	return ret;
366 367 368 369 370 371 372 373 374
}

int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
	int ret;

	mutex_lock(&q->sysfs_lock);
	ret = __blk_mq_register_dev(dev, q);
	mutex_unlock(&q->sysfs_lock);
375

376
	return ret;
377
}
378
EXPORT_SYMBOL_GPL(blk_mq_register_dev);
379 380 381 382 383 384

void blk_mq_sysfs_unregister(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

385
	mutex_lock(&q->sysfs_lock);
386
	if (!q->mq_sysfs_init_done)
387
		goto unlock;
388

389 390
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_unregister_hctx(hctx);
391 392 393

unlock:
	mutex_unlock(&q->sysfs_lock);
394 395 396 397 398 399 400
}

int blk_mq_sysfs_register(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i, ret = 0;

401
	mutex_lock(&q->sysfs_lock);
402
	if (!q->mq_sysfs_init_done)
403
		goto unlock;
404

405 406 407 408 409 410
	queue_for_each_hw_ctx(q, hctx, i) {
		ret = blk_mq_register_hctx(hctx);
		if (ret)
			break;
	}

411 412 413
unlock:
	mutex_unlock(&q->sysfs_lock);

414 415
	return ret;
}