blk-mq-sysfs.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>

#include <linux/blk-mq.h>
13
#include "blk.h"
14 15 16 17 18
#include "blk-mq.h"
#include "blk-mq-tag.h"

static void blk_mq_sysfs_release(struct kobject *kobj)
{
19 20 21 22 23 24 25 26 27 28 29 30
	struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);

	free_percpu(ctxs->queue_ctx);
	kfree(ctxs);
}

static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);

	/* ctx->ctxs won't be released until all ctx are freed */
	kobject_put(&ctx->ctxs->kobj);
31 32
}

33 34 35 36
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
						  kobj);
37 38 39 40 41

	if (hctx->flags & BLK_MQ_F_BLOCKING)
		cleanup_srcu_struct(hctx->srcu);
	blk_free_flush_queue(hctx->fq);
	sbitmap_free(&hctx->ctx_map);
42
	free_cpumask_var(hctx->cpumask);
43 44 45 46
	kfree(hctx->ctxs);
	kfree(hctx);
}

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
struct blk_mq_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_ctx *, char *);
	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};

struct blk_mq_hw_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};

static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
				 char *page)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(ctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *page, size_t length)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(ctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
				    struct attribute *attr, char *page)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(hctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
				     struct attribute *attr, const char *page,
				     size_t length)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(hctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

152 153
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
					    char *page)
154
{
155
	return sprintf(page, "%u\n", hctx->tags->nr_tags);
156 157
}

158 159
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
						     char *page)
160
{
161
	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
162 163
}

164 165
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
166
	const size_t size = PAGE_SIZE - 1;
167
	unsigned int i, first = 1;
168
	int ret = 0, pos = 0;
169

170
	for_each_cpu(i, hctx->cpumask) {
171
		if (first)
172
			ret = snprintf(pos + page, size - pos, "%u", i);
173
		else
174 175 176 177
			ret = snprintf(pos + page, size - pos, ", %u", i);

		if (ret >= size - pos)
			break;
178 179

		first = 0;
180
		pos += ret;
181 182
	}

183
	ret = snprintf(pos + page, size + 1 - pos, "\n");
184
	return pos + ret;
185 186
}

187 188 189 190
static struct attribute *default_ctx_attrs[] = {
	NULL,
};

191
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
192
	.attr = {.name = "nr_tags", .mode = 0444 },
193 194 195
	.show = blk_mq_hw_sysfs_nr_tags_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
196
	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
197 198
	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
};
199
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
200
	.attr = {.name = "cpu_list", .mode = 0444 },
201 202
	.show = blk_mq_hw_sysfs_cpus_show,
};
203 204

static struct attribute *default_hw_ctx_attrs[] = {
205 206
	&blk_mq_hw_sysfs_nr_tags.attr,
	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
207
	&blk_mq_hw_sysfs_cpus.attr,
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	NULL,
};

static const struct sysfs_ops blk_mq_sysfs_ops = {
	.show	= blk_mq_sysfs_show,
	.store	= blk_mq_sysfs_store,
};

static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
	.show	= blk_mq_hw_sysfs_show,
	.store	= blk_mq_hw_sysfs_store,
};

static struct kobj_type blk_mq_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.release	= blk_mq_sysfs_release,
};

static struct kobj_type blk_mq_ctx_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.default_attrs	= default_ctx_attrs,
229
	.release	= blk_mq_ctx_sysfs_release,
230 231 232 233 234
};

static struct kobj_type blk_mq_hw_ktype = {
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.default_attrs	= default_hw_ctx_attrs,
235
	.release	= blk_mq_hw_sysfs_release,
236 237
};

238
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
239 240 241 242
{
	struct blk_mq_ctx *ctx;
	int i;

243
	if (!hctx->nr_ctx)
244 245 246 247 248 249 250 251
		return;

	hctx_for_each_ctx(hctx, ctx, i)
		kobject_del(&ctx->kobj);

	kobject_del(&hctx->kobj);
}

252
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
253 254 255 256 257
{
	struct request_queue *q = hctx->queue;
	struct blk_mq_ctx *ctx;
	int i, ret;

258
	if (!hctx->nr_ctx)
259 260
		return 0;

261
	ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
262 263 264 265 266 267 268 269 270 271 272 273
	if (ret)
		return ret;

	hctx_for_each_ctx(hctx, ctx, i) {
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		if (ret)
			break;
	}

	return ret;
}

274
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
275
{
276
	struct blk_mq_hw_ctx *hctx;
277
	int i;
278

279 280
	lockdep_assert_held(&q->sysfs_lock);

281
	queue_for_each_hw_ctx(q, hctx, i)
282 283
		blk_mq_unregister_hctx(hctx);

284 285
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
286
	kobject_put(&dev->kobj);
287 288

	q->mq_sysfs_init_done = false;
289 290
}

K
Keith Busch 已提交
291 292 293 294 295
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}

296 297 298 299 300 301 302 303 304
void blk_mq_sysfs_deinit(struct request_queue *q)
{
	struct blk_mq_ctx *ctx;
	int cpu;

	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
		kobject_put(&ctx->kobj);
	}
305
	kobject_put(q->mq_kobj);
306 307
}

308
void blk_mq_sysfs_init(struct request_queue *q)
309 310
{
	struct blk_mq_ctx *ctx;
311
	int cpu;
312

313
	kobject_init(q->mq_kobj, &blk_mq_ktype);
314

315 316
	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
317 318

		kobject_get(q->mq_kobj);
319
		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
320
	}
321 322
}

323
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
324 325
{
	struct blk_mq_hw_ctx *hctx;
326
	int ret, i;
327

328 329
	WARN_ON_ONCE(!q->kobj.parent);
	lockdep_assert_held(&q->sysfs_lock);
330

331
	ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
332
	if (ret < 0)
333
		goto out;
334

335
	kobject_uevent(q->mq_kobj, KOBJ_ADD);
336 337

	queue_for_each_hw_ctx(q, hctx, i) {
338
		ret = blk_mq_register_hctx(hctx);
339
		if (ret)
340
			goto unreg;
341 342
	}

343
	q->mq_sysfs_init_done = true;
344

345
out:
346
	return ret;
347 348 349 350 351

unreg:
	while (--i >= 0)
		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);

352 353
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
354 355
	kobject_put(&dev->kobj);
	return ret;
356 357 358 359 360 361 362 363 364
}

int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
	int ret;

	mutex_lock(&q->sysfs_lock);
	ret = __blk_mq_register_dev(dev, q);
	mutex_unlock(&q->sysfs_lock);
365

366
	return ret;
367
}
368
EXPORT_SYMBOL_GPL(blk_mq_register_dev);
369 370 371 372 373 374

void blk_mq_sysfs_unregister(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

375
	mutex_lock(&q->sysfs_lock);
376
	if (!q->mq_sysfs_init_done)
377
		goto unlock;
378

379 380
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_unregister_hctx(hctx);
381 382 383

unlock:
	mutex_unlock(&q->sysfs_lock);
384 385 386 387 388 389 390
}

int blk_mq_sysfs_register(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i, ret = 0;

391
	mutex_lock(&q->sysfs_lock);
392
	if (!q->mq_sysfs_init_done)
393
		goto unlock;
394

395 396 397 398 399 400
	queue_for_each_hw_ctx(q, hctx, i) {
		ret = blk_mq_register_hctx(hctx);
		if (ret)
			break;
	}

401 402 403
unlock:
	mutex_unlock(&q->sysfs_lock);

404 405
	return ret;
}