blk-mq-sysfs.c 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>

#include <linux/blk-mq.h>
13
#include "blk.h"
14 15 16 17 18 19 20
#include "blk-mq.h"
#include "blk-mq-tag.h"

static void blk_mq_sysfs_release(struct kobject *kobj)
{
}

21 22 23 24
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
						  kobj);
25 26 27 28 29

	if (hctx->flags & BLK_MQ_F_BLOCKING)
		cleanup_srcu_struct(hctx->srcu);
	blk_free_flush_queue(hctx->fq);
	sbitmap_free(&hctx->ctx_map);
30
	free_cpumask_var(hctx->cpumask);
31 32 33 34
	kfree(hctx->ctxs);
	kfree(hctx);
}

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
struct blk_mq_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_ctx *, char *);
	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};

struct blk_mq_hw_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};

static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
				 char *page)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(ctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *page, size_t length)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(ctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
				    struct attribute *attr, char *page)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(hctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
				     struct attribute *attr, const char *page,
				     size_t length)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(hctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

140 141
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
					    char *page)
142
{
143
	return sprintf(page, "%u\n", hctx->tags->nr_tags);
144 145
}

146 147
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
						     char *page)
148
{
149
	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
150 151
}

152 153
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
154
	const size_t size = PAGE_SIZE - 1;
155
	unsigned int i, first = 1;
156
	int ret = 0, pos = 0;
157

158
	for_each_cpu(i, hctx->cpumask) {
159
		if (first)
160
			ret = snprintf(pos + page, size - pos, "%u", i);
161
		else
162 163 164 165
			ret = snprintf(pos + page, size - pos, ", %u", i);

		if (ret >= size - pos)
			break;
166 167

		first = 0;
168
		pos += ret;
169 170
	}

171 172
	ret = snprintf(pos + page, size - pos, "\n");
	return pos + ret;
173 174
}

175 176 177 178
static struct attribute *default_ctx_attrs[] = {
	NULL,
};

179
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
180
	.attr = {.name = "nr_tags", .mode = 0444 },
181 182 183
	.show = blk_mq_hw_sysfs_nr_tags_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
184
	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
185 186
	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
};
187
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
188
	.attr = {.name = "cpu_list", .mode = 0444 },
189 190
	.show = blk_mq_hw_sysfs_cpus_show,
};
191 192

static struct attribute *default_hw_ctx_attrs[] = {
193 194
	&blk_mq_hw_sysfs_nr_tags.attr,
	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
195
	&blk_mq_hw_sysfs_cpus.attr,
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	NULL,
};

static const struct sysfs_ops blk_mq_sysfs_ops = {
	.show	= blk_mq_sysfs_show,
	.store	= blk_mq_sysfs_store,
};

static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
	.show	= blk_mq_hw_sysfs_show,
	.store	= blk_mq_hw_sysfs_store,
};

static struct kobj_type blk_mq_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.release	= blk_mq_sysfs_release,
};

static struct kobj_type blk_mq_ctx_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.default_attrs	= default_ctx_attrs,
217
	.release	= blk_mq_sysfs_release,
218 219 220 221 222
};

static struct kobj_type blk_mq_hw_ktype = {
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.default_attrs	= default_hw_ctx_attrs,
223
	.release	= blk_mq_hw_sysfs_release,
224 225
};

226
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
227 228 229 230
{
	struct blk_mq_ctx *ctx;
	int i;

231
	if (!hctx->nr_ctx)
232 233 234 235 236 237 238 239
		return;

	hctx_for_each_ctx(hctx, ctx, i)
		kobject_del(&ctx->kobj);

	kobject_del(&hctx->kobj);
}

240
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
241 242 243 244 245
{
	struct request_queue *q = hctx->queue;
	struct blk_mq_ctx *ctx;
	int i, ret;

246
	if (!hctx->nr_ctx)
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
		return 0;

	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
	if (ret)
		return ret;

	hctx_for_each_ctx(hctx, ctx, i) {
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		if (ret)
			break;
	}

	return ret;
}

262
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
263
{
264
	struct blk_mq_hw_ctx *hctx;
265
	int i;
266

267 268
	lockdep_assert_held(&q->sysfs_lock);

269
	queue_for_each_hw_ctx(q, hctx, i)
270 271
		blk_mq_unregister_hctx(hctx);

272 273
	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
	kobject_del(&q->mq_kobj);
274
	kobject_put(&dev->kobj);
275 276

	q->mq_sysfs_init_done = false;
277 278
}

K
Keith Busch 已提交
279 280 281 282 283
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}

284 285 286 287 288 289 290 291 292 293 294 295
void blk_mq_sysfs_deinit(struct request_queue *q)
{
	struct blk_mq_ctx *ctx;
	int cpu;

	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
		kobject_put(&ctx->kobj);
	}
	kobject_put(&q->mq_kobj);
}

296
void blk_mq_sysfs_init(struct request_queue *q)
297 298
{
	struct blk_mq_ctx *ctx;
299
	int cpu;
300 301 302

	kobject_init(&q->mq_kobj, &blk_mq_ktype);

303 304
	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
305
		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
306
	}
307 308
}

309
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
310 311
{
	struct blk_mq_hw_ctx *hctx;
312
	int ret, i;
313

314 315
	WARN_ON_ONCE(!q->kobj.parent);
	lockdep_assert_held(&q->sysfs_lock);
316

317 318
	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
	if (ret < 0)
319
		goto out;
320 321 322 323

	kobject_uevent(&q->mq_kobj, KOBJ_ADD);

	queue_for_each_hw_ctx(q, hctx, i) {
324
		ret = blk_mq_register_hctx(hctx);
325
		if (ret)
326
			goto unreg;
327 328
	}

329
	q->mq_sysfs_init_done = true;
330

331
out:
332
	return ret;
333 334 335 336 337 338 339 340 341

unreg:
	while (--i >= 0)
		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);

	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
	kobject_del(&q->mq_kobj);
	kobject_put(&dev->kobj);
	return ret;
342 343 344 345 346 347 348 349 350
}

int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
	int ret;

	mutex_lock(&q->sysfs_lock);
	ret = __blk_mq_register_dev(dev, q);
	mutex_unlock(&q->sysfs_lock);
351

352
	return ret;
353
}
354
EXPORT_SYMBOL_GPL(blk_mq_register_dev);
355 356 357 358 359 360

void blk_mq_sysfs_unregister(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

361
	mutex_lock(&q->sysfs_lock);
362
	if (!q->mq_sysfs_init_done)
363
		goto unlock;
364

365 366
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_unregister_hctx(hctx);
367 368 369

unlock:
	mutex_unlock(&q->sysfs_lock);
370 371 372 373 374 375 376
}

int blk_mq_sysfs_register(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i, ret = 0;

377
	mutex_lock(&q->sysfs_lock);
378
	if (!q->mq_sysfs_init_done)
379
		goto unlock;
380

381 382 383 384 385 386
	queue_for_each_hw_ctx(q, hctx, i) {
		ret = blk_mq_register_hctx(hctx);
		if (ret)
			break;
	}

387 388 389
unlock:
	mutex_unlock(&q->sysfs_lock);

390 391
	return ret;
}